diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..97845797410cdd09cb665665981bdf035ef258e3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,33 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/comfyui_controlnet_aux/examples/example_mesh_graphormer.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mesh_graphormer/hand_landmarker.task filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/presets.jpg filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10[[:space:]]steps[[:space:]]SDXL[[:space:]]AYS[[:space:]]Warp[[:space:]]drive[[:space:]]variation.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/11728UI_00001_.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12[[:space:]]steps[[:space:]]SDXL[[:space:]]AYS[[:space:]]Warp[[:space:]]drive[[:space:]]workflow.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12steps.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/24steps.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/attention_modifiers_explainations.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/00382UI_00001_.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/01207UI_00001_.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/01217UI_00001_.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/a[[:space:]]bad[[:space:]]upscale[[:space:]]looks[[:space:]]like[[:space:]]low[[:space:]]quality[[:space:]]jpeg.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/another[[:space:]]bad[[:space:]]upscale[[:space:]]looking[[:space:]]like[[:space:]]jpeg.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/intradasting.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/laule.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/niiiiiice.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/special[[:space:]]double[[:space:]]pass.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/web.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/My[[:space:]]current[[:space:]]go-to[[:space:]]settings.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/potato[[:space:]]attention[[:space:]]guidance.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/simple[[:space:]]SD[[:space:]]upscale.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/Start_by_this_one.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/was-node-suite-comfyui/repos/SAM/assets/masks1.png filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/was-node-suite-comfyui/repos/SAM/assets/minidemo.gif filter=lfs diff=lfs merge=lfs -text +ComfyUI/custom_nodes/was-node-suite-comfyui/repos/SAM/assets/notebook2.png filter=lfs diff=lfs merge=lfs -text diff --git a/ComfyUI/custom_nodes/ComfyMath/LICENSE b/ComfyUI/custom_nodes/ComfyMath/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..29f81d812f3e768fa89638d1f72920dbfd1413a8 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ComfyUI/custom_nodes/ComfyMath/README.md b/ComfyUI/custom_nodes/ComfyMath/README.md new file mode 100644 index 0000000000000000000000000000000000000000..79cc6847911b3c50a688938eee630c3bd2240a3a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/README.md @@ -0,0 +1,19 @@ +# ComfyMath + +Provides Math Nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) + +## Features + +Provides nodes for: +* Boolean Logic +* Integer Arithmetic +* Floating Point Arithmetic and Functions +* Vec2, Vec3, and Vec4 Arithmetic and Functions + +## Installation + +From the `custom_nodes` directory in your ComfyUI installation, run: + +```sh +git clone https://github.com/evanspearman/ComfyMath.git +``` diff --git a/ComfyUI/custom_nodes/ComfyMath/__init__.py b/ComfyUI/custom_nodes/ComfyMath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b198c77638cf894d0611984e5a04c3e0792f6f52 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/__init__.py @@ -0,0 +1,31 @@ +from .src.comfymath.convert import NODE_CLASS_MAPPINGS as convert_NCM +from .src.comfymath.bool import NODE_CLASS_MAPPINGS as bool_NCM +from .src.comfymath.int import NODE_CLASS_MAPPINGS as int_NCM +from .src.comfymath.float import NODE_CLASS_MAPPINGS as float_NCM +from .src.comfymath.number import NODE_CLASS_MAPPINGS as number_NCM +from .src.comfymath.vec import NODE_CLASS_MAPPINGS as vec_NCM +from .src.comfymath.control import NODE_CLASS_MAPPINGS as control_NCM +from .src.comfymath.graphics import NODE_CLASS_MAPPINGS as graphics_NCM + + + + +NODE_CLASS_MAPPINGS = { + **convert_NCM, + **bool_NCM, + **int_NCM, + **float_NCM, + **number_NCM, + **vec_NCM, + **control_NCM, + **graphics_NCM, +} + + +def remove_cm_prefix(node_mapping: str) -> str: + if node_mapping.startswith("CM_"): + return node_mapping[3:] + return node_mapping + + +NODE_DISPLAY_NAME_MAPPINGS = {key: remove_cm_prefix(key) for key in NODE_CLASS_MAPPINGS} diff --git a/ComfyUI/custom_nodes/ComfyMath/__pycache__/__init__.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bf23b9ec95f884badf238810d410c47220f82c7 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/__pycache__/__init__.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/pyproject.toml b/ComfyUI/custom_nodes/ComfyMath/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..259939cff8a66c95c570deecb3ff705e3919ef89 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "comfymath" +version = "0.1.0" +description = "Math nodes for ComfyUI" +authors = ["Evan Spearman "] +license = "Apache-2.0" +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.10" +numpy = "^1.25.1" + +[tool.poetry.group.dev.dependencies] +mypy = "^1.4.1" +black = "^23.7.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/ComfyUI/custom_nodes/ComfyMath/requirements.txt b/ComfyUI/custom_nodes/ComfyMath/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..bffa19baa5e48ec4bc61eb39a4c79e48e6d9d845 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/requirements.txt @@ -0,0 +1 @@ +numpy diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__init__.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/__init__.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10be83e44eb6a54ef4b4fabfb274f8fe0fb7378 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/__init__.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/bool.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/bool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7827b64f200c3f76bab39cb63899804183e345d1 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/bool.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/control.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/control.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb69d879ad41cfeda8b25f435f16be7438928b11 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/control.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/convert.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17b5bc52b59b61b18e6a37bcd7fd2234b8d7103b Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/convert.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/float.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/float.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f18623c27f9d45eec190c6431fc7f33a8a18f655 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/float.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/graphics.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/graphics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8837350a77b225d8e1b7dd09ae3a13326f693834 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/graphics.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/int.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/int.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70edf5cda1e5123a99b10911acd7df51a2168fb2 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/int.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/number.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/number.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f4d729e819bab679cae56411fe2b040a9eb3760 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/number.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/vec.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ac54680bd37142803a9d210ef785c4f3d93fa9 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/__pycache__/vec.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/bool.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/bool.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf740f28597527f9fa730fa9e52e207b140e30d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/bool.py @@ -0,0 +1,59 @@ +from typing import Any, Callable, Mapping + +DEFAULT_BOOL = ("BOOL", {"default": False}) + + +BOOL_UNARY_OPERATIONS: Mapping[str, Callable[[bool], bool]] = { + "Not": lambda a: not a, +} + +BOOL_BINARY_OPERATIONS: Mapping[str, Callable[[bool, bool], bool]] = { + "Nor": lambda a, b: not (a or b), + "Xor": lambda a, b: a ^ b, + "Nand": lambda a, b: not (a and b), + "And": lambda a, b: a and b, + "Xnor": lambda a, b: not (a ^ b), + "Or": lambda a, b: a or b, + "Eq": lambda a, b: a == b, + "Neq": lambda a, b: a != b, +} + + +class BoolUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": {"op": (list(BOOL_UNARY_OPERATIONS.keys()),), "a": DEFAULT_BOOL} + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/bool" + + def op(self, op: str, a: bool) -> tuple[bool]: + return (BOOL_UNARY_OPERATIONS[op](a),) + + +class BoolBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(BOOL_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_BOOL, + "b": DEFAULT_BOOL, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/bool" + + def op(self, op: str, a: bool, b: bool) -> tuple[bool]: + return (BOOL_BINARY_OPERATIONS[op](a, b),) + + +NODE_CLASS_MAPPINGS = { + "CM_BoolUnaryOperation": BoolUnaryOperation, + "CM_BoolBinaryOperation": BoolBinaryOperation, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/control.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/control.py new file mode 100644 index 0000000000000000000000000000000000000000..a46764f374c0b6f5ab7bcc0574919c2ac4920be0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/control.py @@ -0,0 +1,3 @@ +from typing import Any, Mapping + +NODE_CLASS_MAPPINGS: Mapping[str, Any] = {} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/convert.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..c93ee5470f9f96e93bc63138ca55904dced80134 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/convert.py @@ -0,0 +1,273 @@ +from typing import Any, Mapping + +from .vec import Vec2, VEC2_ZERO, Vec3, VEC3_ZERO, Vec4, VEC4_ZERO +from .number import number + + +class BoolToInt: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("BOOL", {"default": False})}} + + RETURN_TYPES = ("INT",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: bool) -> tuple[int]: + return (int(a),) + + +class IntToBool: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("INT", {"default": 0})}} + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: int) -> tuple[bool]: + return (a != 0,) + + +class FloatToInt: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("FLOAT", {"default": 0.0})}} + + RETURN_TYPES = ("INT",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: float) -> tuple[int]: + return (int(a),) + + +class IntToFloat: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("INT", {"default": 0})}} + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: int) -> tuple[float]: + return (float(a),) + + +class IntToNumber: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("INT", {"default": 0})}} + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: int) -> tuple[number]: + return (a,) + + +class NumberToInt: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("NUMBER", {"default": 0.0})}} + + RETURN_TYPES = ("INT",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: number) -> tuple[int]: + return (int(a),) + + +class FloatToNumber: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("FLOAT", {"default": 0.0})}} + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: float) -> tuple[number]: + return (a,) + + +class NumberToFloat: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("NUMBER", {"default": 0.0})}} + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: number) -> tuple[float]: + return (float(a),) + + +class ComposeVec2: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "x": ("FLOAT", {"default": 0.0}), + "y": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC2",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, x: float, y: float) -> tuple[Vec2]: + return ((x, y),) + + +class FillVec2: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "a": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC2",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: float) -> tuple[Vec2]: + return ((a, a),) + + +class BreakoutVec2: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("VEC2", {"default": VEC2_ZERO})}} + + RETURN_TYPES = ("FLOAT", "FLOAT") + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: Vec2) -> tuple[float, float]: + return (a[0], a[1]) + + +class ComposeVec3: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "x": ("FLOAT", {"default": 0.0}), + "y": ("FLOAT", {"default": 0.0}), + "z": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC3",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, x: float, y: float, z: float) -> tuple[Vec3]: + return ((x, y, z),) + + +class FillVec3: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "a": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC3",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: float) -> tuple[Vec3]: + return ((a, a, a),) + + +class BreakoutVec3: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("VEC3", {"default": VEC3_ZERO})}} + + RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT") + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: Vec3) -> tuple[float, float, float]: + return (a[0], a[1], a[2]) + + +class ComposeVec4: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "x": ("FLOAT", {"default": 0.0}), + "y": ("FLOAT", {"default": 0.0}), + "z": ("FLOAT", {"default": 0.0}), + "w": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC4",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, x: float, y: float, z: float, w: float) -> tuple[Vec4]: + return ((x, y, z, w),) + + +class FillVec4: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "a": ("FLOAT", {"default": 0.0}), + } + } + + RETURN_TYPES = ("VEC4",) + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: float) -> tuple[Vec4]: + return ((a, a, a, a),) + + +class BreakoutVec4: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"a": ("VEC4", {"default": VEC4_ZERO})}} + + RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "FLOAT") + FUNCTION = "op" + CATEGORY = "math/conversion" + + def op(self, a: Vec4) -> tuple[float, float, float, float]: + return (a[0], a[1], a[2], a[3]) + + +NODE_CLASS_MAPPINGS = { + "CM_BoolToInt": BoolToInt, + "CM_IntToBool": IntToBool, + "CM_FloatToInt": FloatToInt, + "CM_IntToFloat": IntToFloat, + "CM_IntToNumber": IntToNumber, + "CM_NumberToInt": NumberToInt, + "CM_FloatToNumber": FloatToNumber, + "CM_NumberToFloat": NumberToFloat, + "CM_ComposeVec2": ComposeVec2, + "CM_ComposeVec3": ComposeVec3, + "CM_ComposeVec4": ComposeVec4, + "CM_BreakoutVec2": BreakoutVec2, + "CM_BreakoutVec3": BreakoutVec3, + "CM_BreakoutVec4": BreakoutVec4, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/float.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/float.py new file mode 100644 index 0000000000000000000000000000000000000000..8f542db654a41c08a2afce2c82125a52280731c4 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/float.py @@ -0,0 +1,159 @@ +import math + +from typing import Any, Callable, Mapping + +DEFAULT_FLOAT = ("FLOAT", {"default": 0.0}) + +FLOAT_UNARY_OPERATIONS: Mapping[str, Callable[[float], float]] = { + "Neg": lambda a: -a, + "Inc": lambda a: a + 1, + "Dec": lambda a: a - 1, + "Abs": lambda a: abs(a), + "Sqr": lambda a: a * a, + "Cube": lambda a: a * a * a, + "Sqrt": lambda a: math.sqrt(a), + "Exp": lambda a: math.exp(a), + "Ln": lambda a: math.log(a), + "Log10": lambda a: math.log10(a), + "Log2": lambda a: math.log2(a), + "Sin": lambda a: math.sin(a), + "Cos": lambda a: math.cos(a), + "Tan": lambda a: math.tan(a), + "Asin": lambda a: math.asin(a), + "Acos": lambda a: math.acos(a), + "Atan": lambda a: math.atan(a), + "Sinh": lambda a: math.sinh(a), + "Cosh": lambda a: math.cosh(a), + "Tanh": lambda a: math.tanh(a), + "Asinh": lambda a: math.asinh(a), + "Acosh": lambda a: math.acosh(a), + "Atanh": lambda a: math.atanh(a), + "Round": lambda a: round(a), + "Floor": lambda a: math.floor(a), + "Ceil": lambda a: math.ceil(a), + "Trunc": lambda a: math.trunc(a), + "Erf": lambda a: math.erf(a), + "Erfc": lambda a: math.erfc(a), + "Gamma": lambda a: math.gamma(a), + "Radians": lambda a: math.radians(a), + "Degrees": lambda a: math.degrees(a), +} + +FLOAT_UNARY_CONDITIONS: Mapping[str, Callable[[float], bool]] = { + "IsZero": lambda a: a == 0.0, + "IsPositive": lambda a: a > 0.0, + "IsNegative": lambda a: a < 0.0, + "IsNonZero": lambda a: a != 0.0, + "IsPositiveInfinity": lambda a: math.isinf(a) and a > 0.0, + "IsNegativeInfinity": lambda a: math.isinf(a) and a < 0.0, + "IsNaN": lambda a: math.isnan(a), + "IsFinite": lambda a: math.isfinite(a), + "IsInfinite": lambda a: math.isinf(a), + "IsEven": lambda a: a % 2 == 0.0, + "IsOdd": lambda a: a % 2 != 0.0, +} + +FLOAT_BINARY_OPERATIONS: Mapping[str, Callable[[float, float], float]] = { + "Add": lambda a, b: a + b, + "Sub": lambda a, b: a - b, + "Mul": lambda a, b: a * b, + "Div": lambda a, b: a / b, + "Mod": lambda a, b: a % b, + "Pow": lambda a, b: a**b, + "FloorDiv": lambda a, b: a // b, + "Max": lambda a, b: max(a, b), + "Min": lambda a, b: min(a, b), + "Log": lambda a, b: math.log(a, b), + "Atan2": lambda a, b: math.atan2(a, b), +} + +FLOAT_BINARY_CONDITIONS: Mapping[str, Callable[[float, float], bool]] = { + "Eq": lambda a, b: a == b, + "Neq": lambda a, b: a != b, + "Gt": lambda a, b: a > b, + "Gte": lambda a, b: a >= b, + "Lt": lambda a, b: a < b, + "Lte": lambda a, b: a <= b, +} + + +class FloatUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_UNARY_OPERATIONS.keys()),), + "a": DEFAULT_FLOAT, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/float" + + def op(self, op: str, a: float) -> tuple[float]: + return (FLOAT_UNARY_OPERATIONS[op](a),) + + +class FloatUnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_UNARY_CONDITIONS.keys()),), + "a": DEFAULT_FLOAT, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/float" + + def op(self, op: str, a: float) -> tuple[bool]: + return (FLOAT_UNARY_CONDITIONS[op](a),) + + +class FloatBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_FLOAT, + "b": DEFAULT_FLOAT, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/float" + + def op(self, op: str, a: float, b: float) -> tuple[float]: + return (FLOAT_BINARY_OPERATIONS[op](a, b),) + + +class FloatBinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_FLOAT, + "b": DEFAULT_FLOAT, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/float" + + def op(self, op: str, a: float, b: float) -> tuple[bool]: + return (FLOAT_BINARY_CONDITIONS[op](a, b),) + + +NODE_CLASS_MAPPINGS = { + "CM_FloatUnaryOperation": FloatUnaryOperation, + "CM_FloatUnaryCondition": FloatUnaryCondition, + "CM_FloatBinaryOperation": FloatBinaryOperation, + "CM_FloatBinaryCondition": FloatBinaryCondition, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/graphics.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/graphics.py new file mode 100644 index 0000000000000000000000000000000000000000..157418b24c74f1f29a19ff56a4c75b2802e80903 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/graphics.py @@ -0,0 +1,77 @@ +from typing import Any, Mapping + + +SDXL_SUPPORTED_RESOLUTIONS = [ + (1024, 1024, 1.0), + (1152, 896, 1.2857142857142858), + (896, 1152, 0.7777777777777778), + (1216, 832, 1.4615384615384615), + (832, 1216, 0.6842105263157895), + (1344, 768, 1.75), + (768, 1344, 0.5714285714285714), + (1536, 640, 2.4), + (640, 1536, 0.4166666666666667), +] + + +class SDXLResolution: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "resolution": ( + [f"{res[0]}x{res[1]}" for res in SDXL_SUPPORTED_RESOLUTIONS], + ) + } + } + + RETURN_TYPES = ("INT", "INT") + RETURN_NAMES = ("width", "height") + FUNCTION = "op" + CATEGORY = "math/graphics" + + def op(self, resolution: str) -> tuple[int, int]: + width, height = resolution.split("x") + return (int(width), int(height)) + + +class NearestSDXLResolution: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return {"required": {"image": ("IMAGE",)}} + + RETURN_TYPES = ("INT", "INT") + RETURN_NAMES = ("width", "height") + FUNCTION = "op" + CATEGORY = "math/graphics" + + def op(self, image) -> tuple[int, int]: + image_width = image.size()[2] + image_height = image.size()[1] + print(f"Input image resolution: {image_width}x{image_height}") + image_ratio = image_width / image_height + differences = [ + (abs(image_ratio - resolution[2]), resolution) + for resolution in SDXL_SUPPORTED_RESOLUTIONS + ] + smallest = None + for difference in differences: + if smallest is None: + smallest = difference + else: + if difference[0] < smallest[0]: + smallest = difference + if smallest is not None: + width = smallest[1][0] + height = smallest[1][1] + else: + width = 1024 + height = 1024 + print(f"Selected SDXL resolution: {width}x{height}") + return (width, height) + + +NODE_CLASS_MAPPINGS = { + "CM_SDXLResolution": SDXLResolution, + "CM_NearestSDXLResolution": NearestSDXLResolution, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/int.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/int.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c1c6c3d1898e1461ecae3317e73f8029d3c088 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/int.py @@ -0,0 +1,129 @@ +import math + +from typing import Any, Callable, Mapping + +DEFAULT_INT = ("INT", {"default": 0}) + +INT_UNARY_OPERATIONS: Mapping[str, Callable[[int], int]] = { + "Abs": lambda a: abs(a), + "Neg": lambda a: -a, + "Inc": lambda a: a + 1, + "Dec": lambda a: a - 1, + "Sqr": lambda a: a * a, + "Cube": lambda a: a * a * a, + "Not": lambda a: ~a, + "Factorial": lambda a: math.factorial(a), +} + +INT_UNARY_CONDITIONS: Mapping[str, Callable[[int], bool]] = { + "IsZero": lambda a: a == 0, + "IsNonZero": lambda a: a != 0, + "IsPositive": lambda a: a > 0, + "IsNegative": lambda a: a < 0, + "IsEven": lambda a: a % 2 == 0, + "IsOdd": lambda a: a % 2 == 1, +} + +INT_BINARY_OPERATIONS: Mapping[str, Callable[[int, int], int]] = { + "Add": lambda a, b: a + b, + "Sub": lambda a, b: a - b, + "Mul": lambda a, b: a * b, + "Div": lambda a, b: a // b, + "Mod": lambda a, b: a % b, + "Pow": lambda a, b: a**b, + "And": lambda a, b: a & b, + "Nand": lambda a, b: ~a & b, + "Or": lambda a, b: a | b, + "Nor": lambda a, b: ~a & b, + "Xor": lambda a, b: a ^ b, + "Xnor": lambda a, b: ~a ^ b, + "Shl": lambda a, b: a << b, + "Shr": lambda a, b: a >> b, + "Max": lambda a, b: max(a, b), + "Min": lambda a, b: min(a, b), +} + +INT_BINARY_CONDITIONS: Mapping[str, Callable[[int, int], bool]] = { + "Eq": lambda a, b: a == b, + "Neq": lambda a, b: a != b, + "Gt": lambda a, b: a > b, + "Lt": lambda a, b: a < b, + "Geq": lambda a, b: a >= b, + "Leq": lambda a, b: a <= b, +} + + +class IntUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": {"op": (list(INT_UNARY_OPERATIONS.keys()),), "a": DEFAULT_INT} + } + + RETURN_TYPES = ("INT",) + FUNCTION = "op" + CATEGORY = "math/int" + + def op(self, op: str, a: int) -> tuple[int]: + return (INT_UNARY_OPERATIONS[op](a),) + + +class IntUnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": {"op": (list(INT_UNARY_CONDITIONS.keys()),), "a": DEFAULT_INT} + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/int" + + def op(self, op: str, a: int) -> tuple[bool]: + return (INT_UNARY_CONDITIONS[op](a),) + + +class IntBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(INT_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_INT, + "b": DEFAULT_INT, + } + } + + RETURN_TYPES = ("INT",) + FUNCTION = "op" + CATEGORY = "math/int" + + def op(self, op: str, a: int, b: int) -> tuple[int]: + return (INT_BINARY_OPERATIONS[op](a, b),) + + +class IntBinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(INT_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_INT, + "b": DEFAULT_INT, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/int" + + def op(self, op: str, a: int, b: int) -> tuple[bool]: + return (INT_BINARY_CONDITIONS[op](a, b),) + + +NODE_CLASS_MAPPINGS = { + "CM_IntUnaryOperation": IntUnaryOperation, + "CM_IntUnaryCondition": IntUnaryCondition, + "CM_IntBinaryOperation": IntBinaryOperation, + "CM_IntBinaryCondition": IntBinaryCondition, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/number.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/number.py new file mode 100644 index 0000000000000000000000000000000000000000..8d34ecd78019ce6e9874de5da05eb0eee461070f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/number.py @@ -0,0 +1,95 @@ +from dataclasses import dataclass +from typing import Any, Callable, Mapping, TypeAlias + +from .float import ( + FLOAT_UNARY_OPERATIONS, + FLOAT_UNARY_CONDITIONS, + FLOAT_BINARY_OPERATIONS, + FLOAT_BINARY_CONDITIONS, +) + +DEFAULT_NUMBER = ("NUMBER", {"default": 0.0}) + +number: TypeAlias = int | float + + +class NumberUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_UNARY_OPERATIONS.keys()),), + "a": DEFAULT_NUMBER, + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "op" + CATEGORY = "math/number" + + def op(self, op: str, a: number) -> tuple[float]: + return (FLOAT_UNARY_OPERATIONS[op](float(a)),) + + +class NumberUnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_UNARY_CONDITIONS.keys()),), + "a": DEFAULT_NUMBER, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/number" + + def op(self, op: str, a: number) -> tuple[bool]: + return (FLOAT_UNARY_CONDITIONS[op](float(a)),) + + +class NumberBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_NUMBER, + "b": DEFAULT_NUMBER, + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "op" + CATEGORY = "math/number" + + def op(self, op: str, a: number, b: number) -> tuple[float]: + return (FLOAT_BINARY_OPERATIONS[op](float(a), float(b)),) + + +class NumberBinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(FLOAT_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_NUMBER, + "b": DEFAULT_NUMBER, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/float" + + def op(self, op: str, a: number, b: number) -> tuple[bool]: + return (FLOAT_BINARY_CONDITIONS[op](float(a), float(b)),) + + +NODE_CLASS_MAPPINGS = { + "CM_NumberUnaryOperation": NumberUnaryOperation, + "CM_NumberUnaryCondition": NumberUnaryCondition, + "CM_NumberBinaryOperation": NumberBinaryOperation, + "CM_NumberBinaryCondition": NumberBinaryCondition, +} diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/py.typed b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ComfyUI/custom_nodes/ComfyMath/src/comfymath/vec.py b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/vec.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6ccc556c12596379a8d8e0ecf1190330fb20bd --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyMath/src/comfymath/vec.py @@ -0,0 +1,501 @@ +import numpy + +from typing import Any, Callable, Mapping, TypeAlias + +Vec2: TypeAlias = tuple[float, float] +VEC2_ZERO = (0.0, 0.0) +DEFAULT_VEC2 = ("VEC2", {"default": VEC2_ZERO}) + +Vec3: TypeAlias = tuple[float, float, float] +VEC3_ZERO = (0.0, 0.0, 0.0) +DEFAULT_VEC3 = ("VEC3", {"default": VEC3_ZERO}) + +Vec4: TypeAlias = tuple[float, float, float, float] +VEC4_ZERO = (0.0, 0.0, 0.0, 0.0) +DEFAULT_VEC4 = ("VEC4", {"default": VEC4_ZERO}) + +VEC_UNARY_OPERATIONS: Mapping[str, Callable[[numpy.ndarray], numpy.ndarray]] = { + "Neg": lambda a: -a, + "Normalize": lambda a: a / numpy.linalg.norm(a), +} + +VEC_TO_SCALAR_UNARY_OPERATION: Mapping[str, Callable[[numpy.ndarray], float]] = { + "Norm": lambda a: numpy.linalg.norm(a).astype(float), +} + +VEC_UNARY_CONDITIONS: Mapping[str, Callable[[numpy.ndarray], bool]] = { + "IsZero": lambda a: not numpy.any(a).astype(bool), + "IsNotZero": lambda a: numpy.any(a).astype(bool), + "IsNormalized": lambda a: numpy.allclose(a, a / numpy.linalg.norm(a)), + "IsNotNormalized": lambda a: not numpy.allclose(a, a / numpy.linalg.norm(a)), +} + +VEC_BINARY_OPERATIONS: Mapping[ + str, Callable[[numpy.ndarray, numpy.ndarray], numpy.ndarray] +] = { + "Add": lambda a, b: a + b, + "Sub": lambda a, b: a - b, + "Cross": lambda a, b: numpy.cross(a, b), +} + +VEC_TO_SCALAR_BINARY_OPERATION: Mapping[ + str, Callable[[numpy.ndarray, numpy.ndarray], float] +] = { + "Dot": lambda a, b: numpy.dot(a, b), + "Distance": lambda a, b: numpy.linalg.norm(a - b).astype(float), +} + +VEC_BINARY_CONDITIONS: Mapping[str, Callable[[numpy.ndarray, numpy.ndarray], bool]] = { + "Eq": lambda a, b: numpy.allclose(a, b), + "Neq": lambda a, b: not numpy.allclose(a, b), +} + +VEC_SCALAR_OPERATION: Mapping[str, Callable[[numpy.ndarray, float], numpy.ndarray]] = { + "Mul": lambda a, b: a * b, + "Div": lambda a, b: a / b, +} + + +def _vec2_from_numpy(a: numpy.ndarray) -> Vec2: + return ( + float(a[0]), + float(a[1]), + ) + + +def _vec3_from_numpy(a: numpy.ndarray) -> Vec3: + return ( + float(a[0]), + float(a[1]), + float(a[2]), + ) + + +def _vec4_from_numpy(a: numpy.ndarray) -> Vec4: + return ( + float(a[0]), + float(a[1]), + float(a[2]), + float(a[3]), + ) + + +class Vec2UnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("VEC2",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2) -> tuple[Vec2]: + return (_vec2_from_numpy(VEC_UNARY_OPERATIONS[op](numpy.array(a))),) + + +class Vec2ToScalarUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_UNARY_OPERATION.keys()),), + "a": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2) -> tuple[float]: + return (VEC_TO_SCALAR_UNARY_OPERATION[op](numpy.array(a)),) + + +class Vec2UnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2) -> tuple[bool]: + return (VEC_UNARY_CONDITIONS[op](numpy.array(a)),) + + +class Vec2BinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC2, + "b": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("VEC2",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2, b: Vec2) -> tuple[Vec2]: + return ( + _vec2_from_numpy(VEC_BINARY_OPERATIONS[op](numpy.array(a), numpy.array(b))), + ) + + +class Vec2ToScalarBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_BINARY_OPERATION.keys()),), + "a": DEFAULT_VEC2, + "b": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2, b: Vec2) -> tuple[float]: + return (VEC_TO_SCALAR_BINARY_OPERATION[op](numpy.array(a), numpy.array(b)),) + + +class Vec2BinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC2, + "b": DEFAULT_VEC2, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2, b: Vec2) -> tuple[bool]: + return (VEC_BINARY_CONDITIONS[op](numpy.array(a), numpy.array(b)),) + + +class Vec2ScalarOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_SCALAR_OPERATION.keys()),), + "a": DEFAULT_VEC2, + "b": ("FLOAT",), + } + } + + RETURN_TYPES = ("VEC2",) + FUNCTION = "op" + CATEGORY = "math/vec2" + + def op(self, op: str, a: Vec2, b: float) -> tuple[Vec2]: + return (_vec2_from_numpy(VEC_SCALAR_OPERATION[op](numpy.array(a), b)),) + + +class Vec3UnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("VEC3",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3) -> tuple[Vec3]: + return (_vec3_from_numpy(VEC_UNARY_OPERATIONS[op](numpy.array(a))),) + + +class Vec3ToScalarUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_UNARY_OPERATION.keys()),), + "a": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3) -> tuple[float]: + return (VEC_TO_SCALAR_UNARY_OPERATION[op](numpy.array(a)),) + + +class Vec3UnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3) -> tuple[bool]: + return (VEC_UNARY_CONDITIONS[op](numpy.array(a)),) + + +class Vec3BinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC3, + "b": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("VEC3",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3, b: Vec3) -> tuple[Vec3]: + return ( + _vec3_from_numpy(VEC_BINARY_OPERATIONS[op](numpy.array(a), numpy.array(b))), + ) + + +class Vec3ToScalarBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_BINARY_OPERATION.keys()),), + "a": DEFAULT_VEC3, + "b": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3, b: Vec3) -> tuple[float]: + return (VEC_TO_SCALAR_BINARY_OPERATION[op](numpy.array(a), numpy.array(b)),) + + +class Vec3BinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC3, + "b": DEFAULT_VEC3, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3, b: Vec3) -> tuple[bool]: + return (VEC_BINARY_CONDITIONS[op](numpy.array(a), numpy.array(b)),) + + +class Vec3ScalarOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_SCALAR_OPERATION.keys()),), + "a": DEFAULT_VEC3, + "b": ("FLOAT",), + } + } + + RETURN_TYPES = ("VEC3",) + FUNCTION = "op" + CATEGORY = "math/vec3" + + def op(self, op: str, a: Vec3, b: float) -> tuple[Vec3]: + return (_vec3_from_numpy(VEC_SCALAR_OPERATION[op](numpy.array(a), b)),) + + +class Vec4UnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("VEC4",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4) -> tuple[Vec4]: + return (_vec4_from_numpy(VEC_UNARY_OPERATIONS[op](numpy.array(a))),) + + +class Vec4ToScalarUnaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_UNARY_OPERATION.keys()),), + "a": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4) -> tuple[float]: + return (VEC_TO_SCALAR_UNARY_OPERATION[op](numpy.array(a)),) + + +class Vec4UnaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_UNARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4) -> tuple[bool]: + return (VEC_UNARY_CONDITIONS[op](numpy.array(a)),) + + +class Vec4BinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_OPERATIONS.keys()),), + "a": DEFAULT_VEC4, + "b": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("VEC4",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4, b: Vec4) -> tuple[Vec4]: + return ( + _vec4_from_numpy(VEC_BINARY_OPERATIONS[op](numpy.array(a), numpy.array(b))), + ) + + +class Vec4ToScalarBinaryOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_TO_SCALAR_BINARY_OPERATION.keys()),), + "a": DEFAULT_VEC4, + "b": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4, b: Vec4) -> tuple[float]: + return (VEC_TO_SCALAR_BINARY_OPERATION[op](numpy.array(a), numpy.array(b)),) + + +class Vec4BinaryCondition: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_BINARY_CONDITIONS.keys()),), + "a": DEFAULT_VEC4, + "b": DEFAULT_VEC4, + } + } + + RETURN_TYPES = ("BOOL",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4, b: Vec4) -> tuple[bool]: + return (VEC_BINARY_CONDITIONS[op](numpy.array(a), numpy.array(b)),) + + +class Vec4ScalarOperation: + @classmethod + def INPUT_TYPES(cls) -> Mapping[str, Any]: + return { + "required": { + "op": (list(VEC_SCALAR_OPERATION.keys()),), + "a": DEFAULT_VEC4, + "b": ("FLOAT",), + } + } + + RETURN_TYPES = ("VEC4",) + FUNCTION = "op" + CATEGORY = "math/vec4" + + def op(self, op: str, a: Vec4, b: float) -> tuple[Vec4]: + return (_vec4_from_numpy(VEC_SCALAR_OPERATION[op](numpy.array(a), b)),) + + +NODE_CLASS_MAPPINGS = { + "CM_Vec2UnaryOperation": Vec2UnaryOperation, + "CM_Vec2UnaryCondition": Vec2UnaryCondition, + "CM_Vec2ToScalarUnaryOperation": Vec2ToScalarUnaryOperation, + "CM_Vec2BinaryOperation": Vec2BinaryOperation, + "CM_Vec2BinaryCondition": Vec2BinaryCondition, + "CM_Vec2ToScalarBinaryOperation": Vec2ToScalarBinaryOperation, + "CM_Vec2ScalarOperation": Vec2ScalarOperation, + "CM_Vec3UnaryOperation": Vec3UnaryOperation, + "CM_Vec3UnaryCondition": Vec3UnaryCondition, + "CM_Vec3ToScalarUnaryOperation": Vec3ToScalarUnaryOperation, + "CM_Vec3BinaryOperation": Vec3BinaryOperation, + "CM_Vec3BinaryCondition": Vec3BinaryCondition, + "CM_Vec3ToScalarBinaryOperation": Vec3ToScalarBinaryOperation, + "CM_Vec3ScalarOperation": Vec3ScalarOperation, + "CM_Vec4UnaryOperation": Vec4UnaryOperation, + "CM_Vec4UnaryCondition": Vec4UnaryCondition, + "CM_Vec4ToScalarUnaryOperation": Vec4ToScalarUnaryOperation, + "CM_Vec4BinaryOperation": Vec4BinaryOperation, + "CM_Vec4BinaryCondition": Vec4BinaryCondition, + "CM_Vec4ToScalarBinaryOperation": Vec4ToScalarBinaryOperation, + "CM_Vec4ScalarOperation": Vec4ScalarOperation, +} diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/README.md b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dd5c29bfd0a84310ad9132be98983175f42d801b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/README.md @@ -0,0 +1,175 @@ +# Abandon this boat and jump on [this one!](https://github.com/Extraltodeus/Skimmed_CFG) + +If you liked other functionnalities, I've re-created most in [this](https://github.com/Extraltodeus/pre_cfg_comfy_nodes_for_ComfyUI) repository. + +# In short: + +My own version "from scratch" of a self-rescaling CFG / anti-burn. It ain't much but it's honest work. + +No more burns and 160% faster gens with the warp drive node. + +Now includes custom attention modifiers and interesting presets as well as temperature scaling. + +Also just tested and it works with pixart sigma. + +Works with SD3 for as long as you don't use any boost feature / cutting the uncond (it's the same thing). 20 steps works nicely. + +# Note: + +The presets are interpreted with eval(). Make sure that you thrust whoever sent a preset to you as it may be used to execute malicious code. + +# Update: + +- Removed and perfected "Uncond Zero" node and moved it to it's [own repository](https://github.com/Extraltodeus/Uncond-Zero-for-ComfyUI/tree/main) +- Removed temperature nodes and set a [repository](https://github.com/Extraltodeus/Stable-Diffusion-temperature-settings) for these + +# Usage: + +![77889aa6-a2f6-48bf-8cde-17c9cbfda5fa](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/c725a06c-8966-43de-ab1c-569e2ff5b151) + + +### That's it! + +- The "boost" toggle will turn off the negative guidance when the sigmas are near 1. This doubles the inference speed. +- The negative strength lerp the cond and uncond. Now in normal times the way I do this would burn things to the ground. But since it is initialy an anti-burn it just works. This idea is inspired by the [negative prompt weight](https://github.com/muerrilla/stable-diffusion-NPW) repository. +- I leave the advanced node for those who are interested. It will not be beneficial to those who do not feel like experimenting. + +For 100 steps this is where the sigma are reaching 1: + +![image](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/525199f1-2857-4027-a96e-105bc4b01860) + +Note: the warp drive node improves the speed a lot more. The average speed is 160% the normal one if used with the AYS scheduler (check the workflow images). + +There seem to be a slight improvement in quality when using the boost with my other node [CLIP Vector Sculptor text encode](https://github.com/Extraltodeus/Vector_Sculptor_ComfyUI) using the "mean" normalization option. + +# Just a note: + +Your CFG won't be your CFG anymore. It is turned into a way to guide the CFG/final intensity/brightness/saturation. So don't hesitate to change your habits while trying! + +# The rest of the explaination: + +While this node is connected, this will turn your sampler's CFG scale into something else. +This methods works by rescaling the CFG at each step by evaluating the potential average min/max values. Aiming at a desired output intensity (by intensity I mean overall brightness/saturation/sharpness). +The base intensity has been arbitrarily chosen by me and your sampler's CFG scale will make this target vary. +I have set the "central" CFG at 8. Meaning that at 4 you will aim at half of the desired range while at 16 it will be doubled. This makes it feel slightly like the usual when you're around the normal values. + +The CFG behavior during the sampling being automatically set for each channel makes it behave differently and therefores gives different outputs than the usual. +From my observations by printing the results while testing, it seems to be going from around 16 at the beginning, to something like 4 near the middle and ends up near ~7. +These values might have changed since I've done a thousand tests with different ways but that's to give you an idea, it's just me eyeballing the CLI's output. + +I use the upper and lower 25% topk mean value as a reference to have some margin of manoeuver. + +It makes the sampling generate overall better quality images. I get much less if not any artifacts anymore and my more creative prompts also tends to give more random, in a good way, different results. + +I attribute this more random yet positive behavior to the fact that it seems to be starting high and then since it becomes lower, it self-corrects and improvise, taking advantage of the sampling process a lot more. + +It is dead simple to use and made sampling more fun from my perspective :) + +You will find it in the model_patches category. + +TLDR: set your CFG at 8 to try it. No burned images and artifacts anymore. CFG is also a bit more sensitive because it's a proportion around 8. + +Low scale like 4 also gives really nice results since your CFG is not the CFG anymore. + +# Updates: + +Updated: +- Up to 28.5% faster generation speed than normal +- Negative weighting + +05.04.24: + +- Updated to latest ComfyUI version. If you get an error: update your ComfyUI + +15.04.24 + +- ~~Added "no uncond" node which completely disable the negative and doubles the speed while rescaling the latent space in the post-cfg function up until the sigmas are at 1 (or really, 6.86%). By itself it is not perfect and I'm searching for solutions to improve the final result. It seems to work better with dpmpp3m_sde/exponential if you're not using anything else. If you are using the PAG node then you don't need to care about the sampler but will generate at a normal speed. Result will be simply different (I personally like them).~~ use the warp drive instead +- To use the [PAG node](https://github.com/pamparamm/sd-perturbed-attention/tree/master) ~~without the complete slow-down (if using the no-uncond node) or at least take advantage of the boost feature:~~ + ~~- in the "pag_nodes.py" file look for "disable_cfg1_optimization=True"~~ + ~~- set it to "disable_cfg1_optimization=False".~~ This is not necessary anymore because the dev modified it already :) +- For the negative lerp function in the other nodes the scale has been divided by two. So if you were using it at 10, set it to 5. + +16.04.24 + +- Added "uncond_start_percentage" as an experimental feature. This allows to start the guidance later as a way to try [Applying Guidance in a Limited Interval Improves +Sample and Distribution Quality in Diffusion Models](https://arxiv.org/pdf/2404.07724.pdf). A more accurate implementation [can be found here](https://github.com/ericbeyer/guidance_interval) :) + +17.04.24 + +- reworked the advanced node and cleaned up +- added timing on every options +- add a post-rescale node which allows to fight deep-frying images a bit more forr some special cases +- added a tweaked version of the Comfy SAG node with start/end sliders +- changed start/end sliders, they are related directly to the sigma values and not in percentage anymore. ⚠ + +01.05.24 + +- Actually working disabled uncond +- Added "warp drive" preset to test it out simply. + +03.05.24 + +- Allows unpatch `turn off the negative` by removing or disconnecting the node. +- added the "Warp drive" node. It uses a new method of my own cooking which uses the previous step to determin a negative. Cutting the generation time by half for approx 3/4 of the steps. +- added example workflows with 10-12 steps but of course you can do more steps if needed. It is not a goal to do less steps in general but also to show it is compatible. + +14.05.24: +- fix the little mem leak 😀 +- temporarily disabled the timed SAG node because an update broke it. +- added node: **preset loader**. Can do what the other can and much more like modify the attention mechanisms! Mostly tested on SDXL 😀! + - Some presets are slower than others. Just like for the perturbed attention guidance for example. Most are just as fast. + - About some of the presets: + - For SD 1.5 "crossed conds customized 3" seems amazing! + - "Enhanced_details_and_tweaked_attention" works better on creative generations and less on characters. + - "Reinforced_style" does not regulates the CFG, gives MUCH MORE importance to your negative prompt, works with 12 steps and is slightly slower. + - "The red riding latent" only works with SDXL. It is an almost nonsensical mix of attention tweaks. Best with 12 steps and really nice with creative prompts. Has the tendency to give more red clothings to the characters. Hence the name. + - "Excellent_attention" is the default settings for the node described below. Don't delete it or the node won't work. 🙃 + - "Potato Attention Guidance" is really nice for portraits of happy people... + - There are a bunch of others. I've generated examples which you can find in the example grids folder. + - Most of these have been tested on SDXL. I have very little idea of the effect on SD 1.5 + - The presets are .json files and can contain a string which will go through eval(). ⚠ + - Always check what is inside before running it when it comes from someone else! I hesitated to share a preset which would plan a shutdown in 60 seconds named "actually shut down the computer in one minute" to let you be aware but that would bother more than it would be helfpul. +- added node: "**Excellent attention**" developped by myself and based on this [astonishingly easy to understand research paper!](https://github.com/Extraltodeus/temp/blob/main/very_science.jpg) But in short: + - Just try it. [Do it](https://www.youtube.com/watch?v=ZXsQAXx_ao0). + - This node allows to disable the input layer 8 on self and cross attention. + - But also to apply a custom modification on cross attention middle layer 0. The "patch_cond" and "patch_uncond" toggles are about this modification. + - While the modification is definitely not very ressource costy, the light patch uses less VRAM. + - The multiplier influences the cross attention and reinforces prompt-following. But like for real. Works better with the "light patch" toggle ON. + - I have ~~only~~ mostly tested it with SDXL. + - You can find a grid example of this node's settings in the "grids_example" folder. + - For some reason the Juggernaut model does not work with it and I have no idea why. +- Customizable attention modifiers: + - Check the ["attention_modifiers_explainations"](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/blob/main/workflows/attention_modifiers_explainations.png) in the workflows. 👀 It is basically a tutorial. + - Experiment what each layer really do by using what is basically a bruteforcing node! (the Attention modifiers tester node) + - This is how you do a [Perturbed Attention Guidance](https://github.com/Extraltodeus/temp/blob/main/PAG.png) for example + + + +# Examples + +### 10 steps with only 2 having the negative enabled. So ~170% faster. 2.5 seconds on a RTX4070 + +![03640UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/673cb47a-095f-4ebb-a186-2f6a49ffd2e1) + +### cherry-picked 24 steps uncond fully disabled (these images are also workflows): + + +![03619UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/19ee6edc-b039-4472-9ec2-c08ea15dd908) + +![03621UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/52695e1c-d28e-427f-9109-7ee4e4b3a5f6) + +![03604UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/ca391b46-f587-43da-98da-a87e4982e4ed) + + + +# Pro tip: + +Did you know that my first activity is to write creative model merging functions? + +While the code is too much of a mess to be shared, I do expose and share my models. You can find them in this [gallery](https://github.com/Extraltodeus/shared_models_galleries)! 😁 + + +----- + +Thanks to ComfyUI for existing and making such things so simple! + diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__init__.py b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c361931650367ff6bbfa64cff6ea13df35e27c --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__init__.py @@ -0,0 +1,27 @@ +from .nodes import * +# from .experimental_temperature import ExperimentalTemperaturePatchSDXL,ExperimentalTemperaturePatchSD15,CLIPTemperaturePatch,CLIPTemperaturePatchDual +# from .nodes_sag_custom import * + +NODE_CLASS_MAPPINGS = { + "Automatic CFG": simpleDynamicCFG, + "Automatic CFG - Negative": simpleDynamicCFGlerpUncond, + "Automatic CFG - Warp Drive": simpleDynamicCFGwarpDrive, + "Automatic CFG - Preset Loader": presetLoader, + "Automatic CFG - Excellent attention": simpleDynamicCFGExcellentattentionPatch, + "Automatic CFG - Advanced": advancedDynamicCFG, + "Automatic CFG - Post rescale only": postCFGrescaleOnly, + "Automatic CFG - Custom attentions": simpleDynamicCFGCustomAttentionPatch, + "Automatic CFG - Attention modifiers": attentionModifierParametersNode, + "Automatic CFG - Attention modifiers tester": attentionModifierBruteforceParametersNode, + "Automatic CFG - Unpatch function": simpleDynamicCFGunpatch, + # "Zero Uncond CFG - standalone patch (incompatible with the others)":uncondZeroNode, + # "Temperature settings SDXL": ExperimentalTemperaturePatchSDXL, + # "Temperature settings SD 1.5": ExperimentalTemperaturePatchSD15, + # "Temperature settings CLIP": CLIPTemperaturePatch, + # "Temperature separate settings CLIP SDXL": CLIPTemperaturePatchDual, + # "SAG delayed activation": SelfAttentionGuidanceCustom, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Automatic CFG - Unpatch function": "Automatic CFG - Unpatch function(Deprecated)", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/__init__.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04ba8413e516e74925f62ef584aa6790f6795f42 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/__init__.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/experimental_temperature.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/experimental_temperature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63787bc034918b46d2d2c637ae0e64a76bc3fad0 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/experimental_temperature.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07e2b40a8b6e8d77607a6026e7137f8e3a730c84 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/__pycache__/nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/experimental_temperature.py b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/experimental_temperature.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f3c4391041ac0a5d675af92d92251bb387f97a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/experimental_temperature.py @@ -0,0 +1,208 @@ +import torch +from torch import nn, einsum +from einops import rearrange, repeat +import torch.nn.functional as F +import math +from comfy import model_management +import types +import os + +def exists(val): + return val is not None + +# better than a division by 0 hey +abs_mean = lambda x: torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x).abs().mean() + +class temperature_patcher(): + def __init__(self, temperature, layer_name="None"): + self.temperature = temperature + self.layer_name = layer_name + + # taken from comfy.ldm.modules + def attention_basic_with_temperature(self, q, k, v, extra_options, mask=None, attn_precision=None): + if isinstance(extra_options, int): + heads = extra_options + else: + heads = extra_options['n_heads'] + + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + + h = heads + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) + + # force cast to fp32 to avoid overflowing + if attn_precision == torch.float32: + sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale + else: + sim = einsum('b i d, b j d -> b i j', q, k) * scale + + del q, k + + if exists(mask): + if mask.dtype == torch.bool: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + else: + if len(mask.shape) == 2: + bs = 1 + else: + bs = mask.shape[0] + mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1]) + sim.add_(mask) + + # attention, what we cannot get enough of + sim = sim.div(self.temperature if self.temperature > 0 else abs_mean(sim)).softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return out + +layers_SD15 = { + "input":[1,2,4,5,7,8], + "middle":[0], + "output":[3,4,5,6,7,8,9,10,11], +} + +layers_SDXL = { + "input":[4,5,7,8], + "middle":[0], + "output":[0,1,2,3,4,5], +} + +class ExperimentalTemperaturePatch: + @classmethod + def INPUT_TYPES(s): + required_inputs = {f"{key}_{layer}": ("BOOLEAN", {"default": False}) for key, layers in s.TOGGLES.items() for layer in layers} + required_inputs["model"] = ("MODEL",) + required_inputs["Temperature"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": 0.01}) + required_inputs["Attention"] = (["both","self","cross"],) + return {"required": required_inputs} + + TOGGLES = {} + RETURN_TYPES = ("MODEL","STRING",) + RETURN_NAMES = ("Model","String",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches" + + def patch(self, model, Temperature, Attention, **kwargs): + m = model.clone() + levels = ["input","middle","output"] + parameters_output = {level:[] for level in levels} + for key, toggle_enabled in kwargs.items(): + current_level = key.split("_")[0] + if current_level in levels and toggle_enabled: + b_number = int(key.split("_")[1]) + parameters_output[current_level].append(b_number) + patcher = temperature_patcher(Temperature,key) + + if Attention in ["both","self"]: + m.set_model_attn1_replace(patcher.attention_basic_with_temperature, current_level, b_number) + if Attention in ["both","cross"]: + m.set_model_attn2_replace(patcher.attention_basic_with_temperature, current_level, b_number) + + parameters_as_string = "\n".join(f"{k}: {','.join(map(str, v))}" for k, v in parameters_output.items()) + parameters_as_string = f"Temperature: {Temperature}\n{parameters_as_string}\nAttention: {Attention}" + return (m, parameters_as_string,) + +ExperimentalTemperaturePatchSDXL = type("ExperimentalTemperaturePatch_SDXL", (ExperimentalTemperaturePatch,), {"TOGGLES": layers_SDXL}) +ExperimentalTemperaturePatchSD15 = type("ExperimentalTemperaturePatch_SD15", (ExperimentalTemperaturePatch,), {"TOGGLES": layers_SD15}) + +class CLIPTemperaturePatch: + @classmethod + def INPUT_TYPES(cls): + return {"required": { "clip": ("CLIP",), + "Temperature": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + + RETURN_TYPES = ("CLIP",) + FUNCTION = "patch" + CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches" + + def patch(self, clip, Temperature): + def custom_optimized_attention(device, mask=None, small_input=True): + return temperature_patcher(Temperature).attention_basic_with_temperature + + def new_forward(self, x, mask=None, intermediate_output=None): + optimized_attention = custom_optimized_attention(x.device, mask=mask is not None, small_input=True) + + if intermediate_output is not None: + if intermediate_output < 0: + intermediate_output = len(self.layers) + intermediate_output + + intermediate = None + for i, l in enumerate(self.layers): + x = l(x, mask, optimized_attention) + if i == intermediate_output: + intermediate = x.clone() + return x, intermediate + + m = clip.clone() + + clip_encoder_instance = m.cond_stage_model.clip_l.transformer.text_model.encoder + clip_encoder_instance.forward = types.MethodType(new_forward, clip_encoder_instance) + + if getattr(m.cond_stage_model, f"clip_g", None) is not None: + clip_encoder_instance_g = m.cond_stage_model.clip_g.transformer.text_model.encoder + clip_encoder_instance_g.forward = types.MethodType(new_forward, clip_encoder_instance_g) + + return (m,) + +class CLIPTemperaturePatchDual: + @classmethod + def INPUT_TYPES(cls): + return {"required": { "clip": ("CLIP",), + "Temperature": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "CLIP_Model": (["clip_g","clip_l","both"],), + }} + + RETURN_TYPES = ("CLIP",) + FUNCTION = "patch" + CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches" + + def patch(self, clip, Temperature, CLIP_Model): + def custom_optimized_attention(device, mask=None, small_input=True): + return temperature_patcher(Temperature, "CLIP").attention_basic_with_temperature + + def new_forward(self, x, mask=None, intermediate_output=None): + optimized_attention = custom_optimized_attention(x.device, mask=mask is not None, small_input=True) + + if intermediate_output is not None: + if intermediate_output < 0: + intermediate_output = len(self.layers) + intermediate_output + + intermediate = None + for i, l in enumerate(self.layers): + x = l(x, mask, optimized_attention) + if i == intermediate_output: + intermediate = x.clone() + return x, intermediate + + m = clip.clone() + + if CLIP_Model in ["clip_l","both"]: + clip_encoder_instance = m.cond_stage_model.clip_l.transformer.text_model.encoder + clip_encoder_instance.forward = types.MethodType(new_forward, clip_encoder_instance) + + if CLIP_Model in ["clip_g","both"]: + if getattr(m.cond_stage_model, f"clip_g", None) is not None: + clip_encoder_instance_g = m.cond_stage_model.clip_g.transformer.text_model.encoder + clip_encoder_instance_g.forward = types.MethodType(new_forward, clip_encoder_instance_g) + + return (m,) diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png new file mode 100644 index 0000000000000000000000000000000000000000..44eada5bac397ded38360d75d35db54559b2e815 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23dbd409ff9526382892b0395c8889bce4afb4f16b2df8a4918abfcb375b2f3a +size 1315710 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png new file mode 100644 index 0000000000000000000000000000000000000000..d8c4aec15472d3e070315ba1d44d3573e9fc745a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f36c78b64e07f85975e9dcb57e6a17787a3c95ffddc89b393c01c70864ed95e4 +size 1158746 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7a5ca135a9c922a9ae6751f5061826f0fcc41a2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c54fad2902563d00fc5f8529a22bff7e85f82f787578cfb787b729e4f64040e3 +size 1661135 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4c14dfdb1df9212e7d648ff449a173bbd5f68b6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:317817674e6b2a8754c953a1adc403cc43793098e8316367b7e9db66653034ee +size 1614187 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/presets.jpg b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/presets.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0922c7c38c4d322e0667e7f67268ebf5dc2b3a30 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/grids_example/presets.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9afb639bf225151a4c4c9dc5a349b83c56d21fc88a43303e37b611579680f125 +size 1419522 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes.py b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..3a619c1e1c3981923eddaadceaad9168ee471238 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes.py @@ -0,0 +1,1292 @@ +import math +from copy import deepcopy +from torch.nn import Upsample +import comfy.model_management as model_management +from comfy.model_patcher import set_model_options_patch_replace +from comfy.ldm.modules.attention import attention_basic, attention_xformers, attention_pytorch, attention_split, attention_sub_quad, optimized_attention_for_device +from .experimental_temperature import temperature_patcher +import comfy.samplers +import comfy.utils +import numpy as np +import torch +import torch.nn.functional as F +from colorama import Fore, Style +import json +import os +import random +import base64 + +original_sampling_function = None +current_dir = os.path.dirname(os.path.realpath(__file__)) +json_preset_path = os.path.join(current_dir, 'presets') +attnfunc = optimized_attention_for_device(model_management.get_torch_device()) +check_string = "UEFUUkVPTi50eHQ=" +support_string = b'CgoKClRoYW5rIHlvdSBmb3IgdXNpbmcgbXkgbm9kZXMhCgpJZiB5b3UgZW5qb3kgaXQsIHBsZWFzZSBjb25zaWRlciBzdXBwb3J0aW5nIG1lIG9uIFBhdHJlb24gdG8ga2VlcCB0aGUgbWFnaWMgZ29pbmchCgpWaXNpdDoKCmh0dHBzOi8vd3d3LnBhdHJlb24uY29tL2V4dHJhbHRvZGV1cwoKCgo=' + +def support_function(): + if base64.b64decode(check_string).decode('utf8') not in os.listdir(current_dir): + print(base64.b64decode(check_string).decode('utf8')) + print(base64.b64decode(support_string).decode('utf8')) + +def sampling_function_patched(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None, **kwargs): + + cond_copy = cond + uncond_copy = uncond + + for fn in model_options.get("sampler_patch_model_pre_cfg_function", []): + args = {"model": model, "sigma": timestep, "model_options": model_options} + model, model_options = fn(args) + + if "sampler_pre_cfg_automatic_cfg_function" in model_options: + uncond, cond, cond_scale = model_options["sampler_pre_cfg_automatic_cfg_function"]( + sigma=timestep, uncond=uncond, cond=cond, cond_scale=cond_scale + ) + + if math.isclose(cond_scale, 1.0) and model_options.get("disable_cfg1_optimization", False) == False: + uncond_ = None + else: + uncond_ = uncond + + conds = [cond, uncond_] + + out = comfy.samplers.calc_cond_batch(model, conds, x, timestep, model_options) + + for fn in model_options.get("sampler_pre_cfg_function", []): + args = {"conds":conds, "conds_out": out, "cond_scale": cond_scale, "timestep": timestep, + "input": x, "sigma": timestep, "model": model, "model_options": model_options} + out = fn(args) + + cond_pred = out[0] + uncond_pred = out[1] + + if "sampler_cfg_function" in model_options: + args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep, + "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options, "cond_pos": cond_copy, "cond_neg": uncond_copy} + cfg_result = x - model_options["sampler_cfg_function"](args) + else: + cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale + + for fn in model_options.get("sampler_post_cfg_function", []): + args = {"denoised": cfg_result, "cond": cond_copy, "uncond": uncond_copy, "model": model, "uncond_denoised": uncond_pred, "cond_denoised": cond_pred, + "sigma": timestep, "model_options": model_options, "input": x} + cfg_result = fn(args) + + return cfg_result + +def monkey_patching_comfy_sampling_function(): + global original_sampling_function + + if original_sampling_function is None: + original_sampling_function = comfy.samplers.sampling_function + # Make sure to only patch once + if hasattr(comfy.samplers.sampling_function, '_automatic_cfg_decorated'): + return + comfy.samplers.sampling_function = sampling_function_patched + comfy.samplers.sampling_function._automatic_cfg_decorated = True # flag to check monkey patch + +def make_sampler_pre_cfg_automatic_cfg_function(minimum_sigma_to_disable_uncond=0, maximum_sigma_to_enable_uncond=1000000, disabled_cond_start=10000,disabled_cond_end=10000): + def sampler_pre_cfg_automatic_cfg_function(sigma, uncond, cond, cond_scale, **kwargs): + if sigma[0] < minimum_sigma_to_disable_uncond or sigma[0] > maximum_sigma_to_enable_uncond: + uncond = None + if sigma[0] <= disabled_cond_start and sigma[0] > disabled_cond_end: + cond = None + return uncond, cond, cond_scale + return sampler_pre_cfg_automatic_cfg_function + +def get_entropy(tensor): + hist = np.histogram(tensor.cpu(), bins=100)[0] + hist = hist / hist.sum() + hist = hist[hist > 0] + return -np.sum(hist * np.log2(hist)) + +def map_sigma(sigma, sigmax, sigmin): + return 1 + ((sigma - sigmax) * (0 - 1)) / (sigmin - sigmax) + +def center_latent_mean_values(latent, per_channel, mult): + for b in range(len(latent)): + if per_channel: + for c in range(len(latent[b])): + latent[b][c] -= latent[b][c].mean() * mult + else: + latent[b] -= latent[b].mean() * mult + return latent + +def get_denoised_ranges(latent, measure="hard", top_k=0.25): + chans = [] + for x in range(len(latent)): + max_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=True).values + min_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=False).values + max_val = torch.mean(max_values).item() + min_val = abs(torch.mean(min_values).item()) if measure == "soft" else torch.mean(torch.abs(min_values)).item() + denoised_range = (max_val + min_val) / 2 + chans.append(denoised_range**2 if measure == "hard_squared" else denoised_range) + return chans + +def get_sigmin_sigmax(model): + model_sampling = model.model.model_sampling + sigmin = model_sampling.sigma(model_sampling.timestep(model_sampling.sigma_min)) + sigmax = model_sampling.sigma(model_sampling.timestep(model_sampling.sigma_max)) + return sigmin, sigmax + +def gaussian_similarity(x, y, sigma=1.0): + diff = (x - y) ** 2 + return torch.exp(-diff / (2 * sigma ** 2)) + +def check_skip(sigma, high_sigma_threshold, low_sigma_threshold): + return sigma > high_sigma_threshold or sigma < low_sigma_threshold + +def max_abs(tensors): + shape = tensors.shape + tensors = tensors.reshape(shape[0], -1) + tensors_abs = torch.abs(tensors) + max_abs_idx = torch.argmax(tensors_abs, dim=0) + result = tensors[max_abs_idx, torch.arange(tensors.shape[1])] + return result.reshape(shape[1:]) + +def gaussian_kernel(size: int, sigma: float): + x = torch.arange(size) - size // 2 + gauss = torch.exp(-x**2 / (2 * sigma**2)) + kernel = gauss / gauss.sum() + return kernel.view(1, size) * kernel.view(size, 1) + +def blur_tensor(tensor, kernel_size = 9, sigma = 2.0): + tensor = tensor.unsqueeze(0) + C = tensor.size(1) + kernel = gaussian_kernel(kernel_size, sigma) + kernel = kernel.expand(C, 1, kernel_size, kernel_size).to(tensor.device).to(dtype=tensor.dtype, device=tensor.device) + padding = kernel_size // 2 + tensor = F.pad(tensor, (padding, padding, padding, padding), mode='reflect') + blurred_tensor = F.conv2d(tensor, kernel, groups=C) + return blurred_tensor.squeeze(0) + +def smallest_distances(tensors): + if all(torch.equal(tensors[0], tensor) for tensor in tensors[1:]): + return tensors[0] + set_device = tensors.device + min_val = torch.full(tensors[0].shape, float("inf")).to(set_device) + result = torch.zeros_like(tensors[0]) + for idx1, t1 in enumerate(tensors): + temp_diffs = torch.zeros_like(tensors[0]) + for idx2, t2 in enumerate(tensors): + if idx1 != idx2: + temp_diffs += torch.abs(torch.sub(t1, t2)) + min_val = torch.minimum(min_val, temp_diffs) + mask = torch.eq(min_val,temp_diffs) + result[mask] = t1[mask] + return result + +def rescale(tensor, multiplier=2): + batch, seq_length, features = tensor.shape + H = W = int(seq_length**0.5) + tensor_reshaped = tensor.view(batch, features, H, W) + new_H = new_W = int(H * multiplier) + resized_tensor = F.interpolate(tensor_reshaped, size=(new_H, new_W), mode='bilinear', align_corners=False) + return resized_tensor.view(batch, new_H * new_W, features) + +# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475 +def slerp(high, low, val): + dims = low.shape + + #flatten to batches + low = low.reshape(dims[0], -1) + high = high.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + # in case we divide by zero + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + return res.reshape(dims) + +normalize_tensor = lambda x: x / x.norm() + +def random_swap(tensors, proportion=1): + num_tensors = tensors.shape[0] + if num_tensors < 2: return tensors[0],0 + tensor_size = tensors[0].numel() + if tensor_size < 100: return tensors[0],0 + + true_count = int(tensor_size * proportion) + mask = torch.cat((torch.ones(true_count, dtype=torch.bool, device=tensors[0].device), + torch.zeros(tensor_size - true_count, dtype=torch.bool, device=tensors[0].device))) + mask = mask[torch.randperm(tensor_size)].reshape(tensors[0].shape) + if num_tensors == 2 and proportion < 1: + index_tensor = torch.ones_like(tensors[0], dtype=torch.int64, device=tensors[0].device) + else: + index_tensor = torch.randint(1 if proportion < 1 else 0, num_tensors, tensors[0].shape, device=tensors[0].device) + for i, t in enumerate(tensors): + if i == 0: continue + merge_mask = index_tensor == i & mask + tensors[0][merge_mask] = t[merge_mask] + return tensors[0] + +def multi_tensor_check_mix(tensors): + if tensors[0].numel() < 2 or len(tensors) < 2: + return tensors[0] + ref_tensor_shape = tensors[0].shape + sequence_tensor = torch.arange(tensors[0].numel(), device=tensors[0].device) % len(tensors) + reshaped_sequence = sequence_tensor.view(ref_tensor_shape) + for i in range(len(tensors)): + if i == 0: continue + mask = reshaped_sequence == i + tensors[0][mask] = tensors[i][mask] + return tensors[0] + +def sspow(input_tensor, p=2): + return input_tensor.abs().pow(p) * input_tensor.sign() + +def sspown(input_tensor, p=2): + abs_t = input_tensor.abs() + abs_t = (abs_t - abs_t.min()) / (abs_t.max() - abs_t.min()) + return abs_t.pow(p) * input_tensor.sign() + +def gradient_merge(tensor1, tensor2, start_value=0, dim=0): + if torch.numel(tensor1) <= 1: return tensor1 + if dim >= tensor1.dim(): dim = 0 + size = tensor1.size(dim) + alpha = torch.linspace(start_value, 1-start_value, steps=size, device=tensor1.device).view([-1 if i == dim else 1 for i in range(tensor1.dim())]) + return tensor1 * alpha + tensor2 * (1 - alpha) + +def save_tensor(input_tensor,name): + if "rndnum" in name: + rndnum = str(random.randint(100000,999999)) + name = name.replace("rndnum", rndnum) + output_directory = os.path.join(current_dir, 'saved_tensors') + os.makedirs(output_directory, exist_ok=True) + output_file_path = os.path.join(output_directory, f"{name}.pt") + torch.save(input_tensor, output_file_path) + return input_tensor + +def print_and_return(input_tensor, *args): + for what_to_print in args: + print(" ",what_to_print) + return input_tensor + +# Experimental testings +def normal_attention(q, k, v, mask=None): + attention_scores = torch.matmul(q, k.transpose(-2, -1)) + d_k = k.size(-1) + attention_scores = attention_scores / torch.sqrt(torch.tensor(d_k, dtype=torch.float32)) + if mask is not None: + attention_scores = attention_scores.masked_fill(mask == 0, float('-inf')) + attention_weights = F.softmax(attention_scores, dim=-1) + output = torch.matmul(attention_weights, v) + return output + +def split_heads(x, n_heads): + batch_size, seq_length, hidden_dim = x.size() + head_dim = hidden_dim // n_heads + x = x.view(batch_size, seq_length, n_heads, head_dim) + return x.permute(0, 2, 1, 3) + +def combine_heads(x, n_heads): + batch_size, n_heads, seq_length, head_dim = x.size() + hidden_dim = n_heads * head_dim + x = x.permute(0, 2, 1, 3).contiguous() + return x.view(batch_size, seq_length, hidden_dim) + +def sparsemax(logits): + logits_sorted, _ = torch.sort(logits, descending=True, dim=-1) + cumulative_sum = torch.cumsum(logits_sorted, dim=-1) - 1 + rho = (logits_sorted > cumulative_sum / (torch.arange(logits.size(-1)) + 1).to(logits.device)).float() + tau = (cumulative_sum / rho.sum(dim=-1, keepdim=True)).gather(dim=-1, index=rho.sum(dim=-1, keepdim=True).long() - 1) + return torch.max(torch.zeros_like(logits), logits - tau) + +def attnfunc_custom(q, k, v, n_heads, eval_string = ""): + q = split_heads(q, n_heads) + k = split_heads(k, n_heads) + v = split_heads(v, n_heads) + + d_k = q.size(-1) + + scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) + + if eval_string == "": + attn_weights = F.softmax(scores, dim=-1) + else: + attn_weights = eval(eval_string) + + output = torch.matmul(attn_weights, v) + output = combine_heads(output, n_heads) + return output + +def min_max_norm(t): + return (t - t.min()) / (t.max() - t.min()) + +class attention_modifier(): + def __init__(self, self_attn_mod_eval, conds = None): + self.self_attn_mod_eval = self_attn_mod_eval + self.conds = conds + + def modified_attention(self, q, k, v, extra_options, mask=None): + + """extra_options contains: {'cond_or_uncond': [1, 0], 'sigmas': tensor([14.6146], device='cuda:0'), + 'original_shape': [2, 4, 128, 128], 'transformer_index': 4, 'block': ('middle', 0), + 'block_index': 3, 'n_heads': 20, 'dim_head': 64, 'attn_precision': None}""" + + if "attnbc" in self.self_attn_mod_eval: + attnbc = attention_basic(q, k, v, extra_options['n_heads'], mask) + if "normattn" in self.self_attn_mod_eval: + normattn = normal_attention(q, k, v, mask) + if "attnxf" in self.self_attn_mod_eval: + attnxf = attention_xformers(q, k, v, extra_options['n_heads'], mask) + if "attnpy" in self.self_attn_mod_eval: + attnpy = attention_pytorch(q, k, v, extra_options['n_heads'], mask) + if "attnsp" in self.self_attn_mod_eval: + attnsp = attention_split(q, k, v, extra_options['n_heads'], mask) + if "attnsq" in self.self_attn_mod_eval: + attnsq = attention_sub_quad(q, k, v, extra_options['n_heads'], mask) + if "attnopt" in self.self_attn_mod_eval: + attnopt = attnfunc(q, k, v, extra_options['n_heads'], mask) + n_heads = extra_options['n_heads'] + if self.conds is not None: + cond_pos_l = self.conds[0][..., :768].cuda() + cond_neg_l = self.conds[1][..., :768].cuda() + if self.conds[0].shape[-1] > 768: + cond_pos_g = self.conds[0][..., 768:2048].cuda() + cond_neg_g = self.conds[1][..., 768:2048].cuda() + return eval(self.self_attn_mod_eval) + +def experimental_functions(cond_input, method, exp_value, exp_normalize, pcp, psi, sigma, sigmax, attention_modifiers_input, args, model_options_copy, eval_string = ""): + """ + There may or may not be an actual reasoning behind each of these methods. + Some like the sine value have interesting properties. Enabled for both cond and uncond preds it somehow make them stronger. + Note that there is a "normalize" toggle and it may change greatly the end result since some operation will totaly butcher the values. + "theDaRkNeSs" for example without normalizing seems to darken if used for cond/uncond (not with the cond as the uncond or something). + Maybe just with the positive. I don't remember. I leave it for now if you want to play around. + + The eval_string can be used to create the uncond replacement. + I made it so it's split by semicolons and only the last split is the value in used. + What is before is added in an array named "v". + pcp is previous cond_pred + psi is previous sigma + args is the CFG function input arguments with the added cond/unconds (like the actual activation conditionings) named respectively "cond_pos" and "cond_neg" + + So if you write: + + pcp if sigma < 7 else -pcp; + print("it works too just don't use the output I guess"); + v[0] if sigma < 14 else torch.zeros_like(cond); + v[-1]*2 + + Well the first line becomes v[0], second v[1] etc. + The last one becomes the result. + Note that it's just an example, I don't see much interest in that one. + + Using comfy.samplers.calc_cond_batch(args["model"], [args["cond_pos"], None], args["input"], args["timestep"], args["model_options"])[0] + can work too. + + This whole mess has for initial goal to attempt to find the best way (or have some bruteforcing fun) to replace the uncond pred for as much as possible. + Or simply to try things around :) + """ + if method == "cond_pred": + return cond_input + default_device = cond_input.device + # print() + # print(get_entropy(cond)) + cond = cond_input.clone() + cond_norm = cond.norm() + if method == "amplify": + mask = torch.abs(cond) >= 1 + cond_copy = cond.clone() + cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign() + cond[mask] = torch.pow(torch.abs(cond_copy[mask]), exp_value) * cond[mask].sign() + elif method == "root": + cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign() + elif method == "power": + cond = torch.pow(torch.abs(cond), exp_value) * cond.sign() + elif method == "erf": + cond = torch.erf(cond) + elif method == "exp_erf": + cond = torch.pow(torch.erf(cond), exp_value) + elif method == "root_erf": + cond = torch.erf(cond) + cond = torch.pow(torch.abs(cond), 1 / exp_value ) * cond.sign() + elif method == "erf_amplify": + cond = torch.erf(cond) + mask = torch.abs(cond) >= 1 + cond_copy = cond.clone() + cond = torch.pow(torch.abs(cond), 1 / exp_value ) * cond.sign() + cond[mask] = torch.pow(torch.abs(cond_copy[mask]), exp_value) * cond[mask].sign() + elif method == "sine": + cond = torch.sin(torch.abs(cond)) * cond.sign() + elif method == "sine_exp": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond), exp_value) * cond.sign() + elif method == "sine_exp_diff": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond_input), exp_value) * cond.sign() - cond + elif method == "sine_exp_diff_to_sine": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond), exp_value) * cond.sign() - cond + elif method == "sine_root": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign() + elif method == "sine_root_diff": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond_input), 1 / exp_value) * cond.sign() - cond + elif method == "sine_root_diff_to_sine": + cond = torch.sin(torch.abs(cond)) * cond.sign() + cond = torch.pow(torch.abs(cond), 1 / exp_value) * cond.sign() - cond + elif method == "theDaRkNeSs": + cond = torch.sin(cond) + cond = torch.pow(torch.abs(cond), 1 / exp_value) * cond.sign() - cond + elif method == "cosine": + cond = torch.cos(torch.abs(cond)) * cond.sign() + elif method == "sign": + cond = cond.sign() + elif method == "zero": + cond = torch.zeros_like(cond) + elif method in ["attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond","subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"]: + cond_to_use = args["cond_pos"] if method in ["attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_cond"] else args["cond_neg"] + tmp_model_options = deepcopy(model_options_copy) + for atm in attention_modifiers_input: + if sigma <= atm['sigma_start'] and sigma > atm['sigma_end']: + block_layers = {"input": atm['unet_block_id_input'], "middle": atm['unet_block_id_middle'], "output": atm['unet_block_id_output']} + for unet_block in block_layers: + for unet_block_id in block_layers[unet_block].split(","): + if unet_block_id != "": + unet_block_id = int(unet_block_id) + tmp_model_options = set_model_options_patch_replace(tmp_model_options, attention_modifier(atm['self_attn_mod_eval'], [args["cond_pos"][0]["cross_attn"], args["cond_neg"][0]["cross_attn"]]if "cond" in atm['self_attn_mod_eval'] else None).modified_attention, atm['unet_attn'], unet_block, unet_block_id) + + cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0] + if method in ["subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"]: + cond = cond_input + (cond_input - cond) * exp_value + + elif method == "previous_average": + if sigma > (sigmax - 1): + cond = torch.zeros_like(cond) + else: + cond = (pcp / psi * sigma + cond) / 2 + elif method == "eval": + if "condmix" in eval_string: + def condmix(args, mult=2): + cond_pos_tmp = deepcopy(args["cond_pos"]) + cond_pos_tmp[0]["cross_attn"] += (args["cond_pos"][0]["cross_attn"] - args["cond_neg"][0]["cross_attn"]*-1) * mult + return cond_pos_tmp + v = [] + evals_strings = eval_string.split(";") + if len(evals_strings) > 1: + for i in range(len(evals_strings[:-1])): + v.append(eval(evals_strings[i])) + cond = eval(evals_strings[-1]) + if exp_normalize and torch.all(cond != 0): + cond = cond * cond_norm / cond.norm() + # print(get_entropy(cond)) + return cond.to(device=default_device) + +class advancedDynamicCFG: + def __init__(self): + self.last_cfg_ht_one = 8 + self.previous_cond_pred = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + + "automatic_cfg" : (["None", "soft", "hard", "hard_squared", "range"], {"default": "hard"},), + + "skip_uncond" : ("BOOLEAN", {"default": True}), + "fake_uncond_start" : ("BOOLEAN", {"default": False}), + "uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + + "lerp_uncond" : ("BOOLEAN", {"default": False}), + "lerp_uncond_strength": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.1, "round": 0.1}), + "lerp_uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "lerp_uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + + "subtract_latent_mean" : ("BOOLEAN", {"default": False}), + "subtract_latent_mean_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "subtract_latent_mean_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + + "latent_intensity_rescale" : ("BOOLEAN", {"default": False}), + "latent_intensity_rescale_method" : (["soft","hard","range"], {"default": "hard"},), + "latent_intensity_rescale_cfg": ("FLOAT", {"default": 8, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.1}), + "latent_intensity_rescale_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "latent_intensity_rescale_sigma_end": ("FLOAT", {"default": 3, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + + "cond_exp": ("BOOLEAN", {"default": False}), + "cond_exp_normalize": ("BOOLEAN", {"default": False}), + "cond_exp_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "cond_exp_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "cond_exp_method": (["amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff", "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "previous_average", "eval", + "attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond", + "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"],), + "cond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 100, "step": 0.1, "round": 0.01}), + + "uncond_exp": ("BOOLEAN", {"default": False}), + "uncond_exp_normalize": ("BOOLEAN", {"default": False}), + "uncond_exp_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "uncond_exp_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "uncond_exp_method": (["amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff", "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "previous_average", "eval", + "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"],), + "uncond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 100, "step": 0.1, "round": 0.01}), + + "fake_uncond_exp": ("BOOLEAN", {"default": False}), + "fake_uncond_exp_normalize": ("BOOLEAN", {"default": False}), + "fake_uncond_exp_method" : (["cond_pred", "previous_average", + "amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff", + "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "eval", + "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond", + "attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond"],), + "fake_uncond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 1000, "step": 0.1, "round": 0.01}), + "fake_uncond_multiplier": ("INT", {"default": 1, "min": -1, "max": 1, "step": 1}), + "fake_uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "fake_uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "auto_cfg_topk": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 0.5, "step": 0.05, "round": 0.01}), + "auto_cfg_ref": ("FLOAT", {"default": 8, "min": 0.0, "max": 100, "step": 0.5, "round": 0.01}), + "attention_modifiers_global_enabled": ("BOOLEAN", {"default": False}), + "disable_cond": ("BOOLEAN", {"default": False}), + "disable_cond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "disable_cond_sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "save_as_preset": ("BOOLEAN", {"default": False}), + "preset_name": ("STRING", {"multiline": False}), + }, + "optional":{ + "eval_string_cond": ("STRING", {"multiline": True}), + "eval_string_uncond": ("STRING", {"multiline": True}), + "eval_string_fake": ("STRING", {"multiline": True}), + "args_filter": ("STRING", {"multiline": True, "forceInput": True}), + "attention_modifiers_positive": ("ATTNMOD", {"forceInput": True}), + "attention_modifiers_negative": ("ATTNMOD", {"forceInput": True}), + "attention_modifiers_fake_negative": ("ATTNMOD", {"forceInput": True}), + "attention_modifiers_global": ("ATTNMOD", {"forceInput": True}), + } + } + RETURN_TYPES = ("MODEL","STRING",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG" + + def patch(self, model, automatic_cfg = "None", + skip_uncond = False, fake_uncond_start = False, uncond_sigma_start = 1000, uncond_sigma_end = 0, + lerp_uncond = False, lerp_uncond_strength = 1, lerp_uncond_sigma_start = 1000, lerp_uncond_sigma_end = 1, + subtract_latent_mean = False, subtract_latent_mean_sigma_start = 1000, subtract_latent_mean_sigma_end = 1, + latent_intensity_rescale = False, latent_intensity_rescale_sigma_start = 1000, latent_intensity_rescale_sigma_end = 1, + cond_exp = False, cond_exp_sigma_start = 1000, cond_exp_sigma_end = 1000, cond_exp_method = "amplify", cond_exp_value = 2, cond_exp_normalize = False, + uncond_exp = False, uncond_exp_sigma_start = 1000, uncond_exp_sigma_end = 1000, uncond_exp_method = "amplify", uncond_exp_value = 2, uncond_exp_normalize = False, + fake_uncond_exp = False, fake_uncond_exp_method = "amplify", fake_uncond_exp_value = 2, fake_uncond_exp_normalize = False, fake_uncond_multiplier = 1, fake_uncond_sigma_start = 1000, fake_uncond_sigma_end = 1, + latent_intensity_rescale_cfg = 8, latent_intensity_rescale_method = "hard", + ignore_pre_cfg_func = False, args_filter = "", auto_cfg_topk = 0.25, auto_cfg_ref = 8, + eval_string_cond = "", eval_string_uncond = "", eval_string_fake = "", + attention_modifiers_global_enabled = False, + attention_modifiers_positive = [], attention_modifiers_negative = [], attention_modifiers_fake_negative = [], attention_modifiers_global = [], + disable_cond=False, disable_cond_sigma_start=1000,disable_cond_sigma_end=1000, save_as_preset = False, preset_name = "", **kwargs + ): + + # support_function() + model_options_copy = deepcopy(model.model_options) + monkey_patching_comfy_sampling_function() + if args_filter != "": + args_filter = args_filter.split(",") + else: + args_filter = [k for k, v in locals().items()] + not_in_filter = ['self','model','args','args_filter','save_as_preset','preset_name','model_options_copy'] + if fake_uncond_exp_method != "eval": + not_in_filter.append("eval_string") + + if save_as_preset and preset_name != "": + preset_parameters = {key: value for key, value in locals().items() if key not in not_in_filter} + with open(os.path.join(json_preset_path, preset_name+".json"), 'w', encoding='utf-8') as f: + json.dump(preset_parameters, f) + print(f"Preset saved with the name: {Fore.GREEN}{preset_name}{Fore.RESET}") + print(f"{Fore.RED}Don't forget to turn the save toggle OFF to not overwrite!{Fore.RESET}") + + args_str = '\n'.join(f'{k}: {v}' for k, v in locals().items() if k not in not_in_filter and k in args_filter) + + sigmin, sigmax = get_sigmin_sigmax(model) + + lerp_start, lerp_end = lerp_uncond_sigma_start, lerp_uncond_sigma_end + subtract_start, subtract_end = subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end + rescale_start, rescale_end = latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end + print(f"Model maximum sigma: {sigmax} / Model minimum sigma: {sigmin}") + m = model.clone() + + if skip_uncond or disable_cond: + # set model_options sampler_pre_cfg_automatic_cfg_function + m.model_options["sampler_pre_cfg_automatic_cfg_function"] = make_sampler_pre_cfg_automatic_cfg_function(uncond_sigma_end if skip_uncond else 0, uncond_sigma_start if skip_uncond else 100000,\ + disable_cond_sigma_start if disable_cond else 100000, disable_cond_sigma_end if disable_cond else 100000) + print(f"Sampling function patched. Uncond enabled from {round(uncond_sigma_start,2)} to {round(uncond_sigma_end,2)}") + elif not ignore_pre_cfg_func: + m.model_options.pop("sampler_pre_cfg_automatic_cfg_function", None) + uncond_sigma_start, uncond_sigma_end = 1000000, 0 + + top_k = auto_cfg_topk + previous_cond_pred = None + previous_sigma = None + def automatic_cfg_function(args): + nonlocal previous_sigma + cond_scale = args["cond_scale"] + input_x = args["input"] + cond_pred = args["cond_denoised"] + uncond_pred = args["uncond_denoised"] + sigma = args["sigma"][0] + model_options = args["model_options"] + if self.previous_cond_pred is None: + self.previous_cond_pred = cond_pred.clone().detach().to(device=cond_pred.device) + if previous_sigma is None: + previous_sigma = sigma.item() + reference_cfg = auto_cfg_ref if auto_cfg_ref > 0 else cond_scale + + def fake_uncond_step(): + return fake_uncond_start and skip_uncond and (sigma > uncond_sigma_start or sigma < uncond_sigma_end) and sigma <= fake_uncond_sigma_start and sigma >= fake_uncond_sigma_end + + if fake_uncond_step(): + uncond_pred = cond_pred.clone().detach().to(device=cond_pred.device) * fake_uncond_multiplier + + if cond_exp and sigma <= cond_exp_sigma_start and sigma >= cond_exp_sigma_end: + cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond) + if uncond_exp and sigma <= uncond_exp_sigma_start and sigma >= uncond_exp_sigma_end and not fake_uncond_step(): + uncond_pred = experimental_functions(uncond_pred, uncond_exp_method, uncond_exp_value, uncond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_negative, args, model_options_copy, eval_string_uncond) + if fake_uncond_step() and fake_uncond_exp: + uncond_pred = experimental_functions(uncond_pred, fake_uncond_exp_method, fake_uncond_exp_value, fake_uncond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_fake_negative, args, model_options_copy, eval_string_fake) + self.previous_cond_pred = cond_pred.clone().detach().to(device=cond_pred.device) + + if sigma >= sigmax or cond_scale > 1: + self.last_cfg_ht_one = cond_scale + target_intensity = self.last_cfg_ht_one / 10 + + if ((check_skip(sigma, uncond_sigma_start, uncond_sigma_end) and skip_uncond) and not fake_uncond_step()) or cond_scale == 1: + return input_x - cond_pred + + if lerp_uncond and not check_skip(sigma, lerp_start, lerp_end) and lerp_uncond_strength != 1: + uncond_pred_norm = uncond_pred.norm() + uncond_pred = torch.lerp(cond_pred, uncond_pred, lerp_uncond_strength) + uncond_pred = uncond_pred * uncond_pred_norm / uncond_pred.norm() + cond = input_x - cond_pred + uncond = input_x - uncond_pred + + if automatic_cfg == "None": + return uncond + cond_scale * (cond - uncond) + + denoised_tmp = input_x - (uncond + reference_cfg * (cond - uncond)) + + for b in range(len(denoised_tmp)): + denoised_ranges = get_denoised_ranges(denoised_tmp[b], automatic_cfg, top_k) + for c in range(len(denoised_tmp[b])): + fixeds_scale = reference_cfg * target_intensity / denoised_ranges[c] + denoised_tmp[b][c] = uncond[b][c] + fixeds_scale * (cond[b][c] - uncond[b][c]) + + return denoised_tmp + + def center_mean_latent_post_cfg(args): + denoised = args["denoised"] + sigma = args["sigma"][0] + if check_skip(sigma, subtract_start, subtract_end): + return denoised + denoised = center_latent_mean_values(denoised, False, 1) + return denoised + + def rescale_post_cfg(args): + denoised = args["denoised"] + sigma = args["sigma"][0] + + if check_skip(sigma, rescale_start, rescale_end): + return denoised + target_intensity = latent_intensity_rescale_cfg / 10 + for b in range(len(denoised)): + denoised_ranges = get_denoised_ranges(denoised[b], latent_intensity_rescale_method) + for c in range(len(denoised[b])): + scale_correction = target_intensity / denoised_ranges[c] + denoised[b][c] = denoised[b][c] * scale_correction + return denoised + + tmp_model_options = deepcopy(m.model_options) + if attention_modifiers_global_enabled: + # print(f"{Fore.GREEN}Sigma timings are ignored for global modifiers.{Fore.RESET}") + for atm in attention_modifiers_global: + block_layers = {"input": atm['unet_block_id_input'], "middle": atm['unet_block_id_middle'], "output": atm['unet_block_id_output']} + for unet_block in block_layers: + for unet_block_id in block_layers[unet_block].split(","): + if unet_block_id != "": + unet_block_id = int(unet_block_id) + tmp_model_options = set_model_options_patch_replace(tmp_model_options, attention_modifier(atm['self_attn_mod_eval']).modified_attention, atm['unet_attn'], unet_block, unet_block_id) + m.model_options = tmp_model_options + + if not ignore_pre_cfg_func: + m.set_model_sampler_cfg_function(automatic_cfg_function, disable_cfg1_optimization = False) + if subtract_latent_mean: + m.set_model_sampler_post_cfg_function(center_mean_latent_post_cfg) + if latent_intensity_rescale: + m.set_model_sampler_post_cfg_function(rescale_post_cfg) + return (m, args_str, ) + +class attentionModifierParametersNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "self_attn_mod_eval": ("STRING", {"multiline": True }, {"default": ""}), + "unet_block_id_input": ("STRING", {"multiline": False}, {"default": ""}), + "unet_block_id_middle": ("STRING", {"multiline": False}, {"default": ""}), + "unet_block_id_output": ("STRING", {"multiline": False}, {"default": ""}), + "unet_attn": (["attn1","attn2","both"],), + }, + "optional":{ + "join_parameters": ("ATTNMOD", {"forceInput": True}), + }} + + RETURN_TYPES = ("ATTNMOD","STRING",) + RETURN_NAMES = ("Attention modifier", "Parameters as string") + FUNCTION = "exec" + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + def exec(self, join_parameters=None, **kwargs): + info_string = "\n".join([f"{k}: {v}" for k,v in kwargs.items() if v != ""]) + if kwargs['unet_attn'] == "both": + copy_kwargs = kwargs.copy() + kwargs['unet_attn'] = "attn1" + copy_kwargs['unet_attn'] = "attn2" + out_modifiers = [kwargs, copy_kwargs] + else: + out_modifiers = [kwargs] + return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, ) + +class attentionModifierBruteforceParametersNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "self_attn_mod_eval": ("STRING", {"multiline": True , "default": ""}), + "unet_block_id_input": ("STRING", {"multiline": False, "default": "4,5,7,8"}), + "unet_block_id_middle": ("STRING", {"multiline": False, "default": "0"}), + "unet_block_id_output": ("STRING", {"multiline": False, "default": "0,1,2,3,4,5"}), + "unet_attn": (["attn1","attn2","both"],), + }, + "optional":{ + "join_parameters": ("ATTNMOD", {"forceInput": True}), + }} + + RETURN_TYPES = ("ATTNMOD","STRING",) + RETURN_NAMES = ("Attention modifier", "Parameters as string") + FUNCTION = "exec" + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + + def create_sequence_parameters(self, input_str, middle_str, output_str): + input_values = input_str.split(",") if input_str else [] + middle_values = middle_str.split(",") if middle_str else [] + output_values = output_str.split(",") if output_str else [] + result = [] + result.extend([{"unet_block_id_input": val, "unet_block_id_middle": "", "unet_block_id_output": ""} for val in input_values]) + result.extend([{"unet_block_id_input": "", "unet_block_id_middle": val, "unet_block_id_output": ""} for val in middle_values]) + result.extend([{"unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": val} for val in output_values]) + return result + + def exec(self, seed, join_parameters=None, **kwargs): + sequence_parameters = self.create_sequence_parameters(kwargs['unet_block_id_input'],kwargs['unet_block_id_middle'],kwargs['unet_block_id_output']) + lenseq = len(sequence_parameters) + current_index = seed % lenseq + current_sequence = sequence_parameters[current_index] + kwargs["unet_block_id_input"] = current_sequence["unet_block_id_input"] + kwargs["unet_block_id_middle"] = current_sequence["unet_block_id_middle"] + kwargs["unet_block_id_output"] = current_sequence["unet_block_id_output"] + if current_sequence["unet_block_id_input"] != "": + current_block_string = f"unet_block_id_input: {current_sequence['unet_block_id_input']}" + elif current_sequence["unet_block_id_middle"] != "": + current_block_string = f"unet_block_id_middle: {current_sequence['unet_block_id_middle']}" + elif current_sequence["unet_block_id_output"] != "": + current_block_string = f"unet_block_id_output: {current_sequence['unet_block_id_output']}" + info_string = f"Progress: {current_index+1}/{lenseq}\n{kwargs['self_attn_mod_eval']}\n{kwargs['unet_attn']} {current_block_string}" + if kwargs['unet_attn'] == "both": + copy_kwargs = kwargs.copy() + kwargs['unet_attn'] = "attn1" + copy_kwargs['unet_attn'] = "attn2" + out_modifiers = [kwargs, copy_kwargs] + else: + out_modifiers = [kwargs] + return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, ) + +class attentionModifierConcatNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "parameters_1": ("ATTNMOD", {"forceInput": True}), + "parameters_2": ("ATTNMOD", {"forceInput": True}), + }} + + RETURN_TYPES = ("ATTNMOD",) + FUNCTION = "exec" + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + def exec(self, parameters_1, parameters_2): + output_parms = parameters_1 + parameters_2 + return (output_parms, ) + +class simpleDynamicCFG: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "hard_mode" : ("BOOLEAN", {"default": True}), + "boost" : ("BOOLEAN", {"default": True}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/presets" + + def patch(self, model, hard_mode, boost): + advcfg = advancedDynamicCFG() + m = advcfg.patch(model, + skip_uncond = boost, + uncond_sigma_start = 1000, uncond_sigma_end = 1, + automatic_cfg = "hard" if hard_mode else "soft" + )[0] + return (m, ) + +class presetLoader: + @classmethod + def INPUT_TYPES(s): + presets_files = [pj.replace(".json","") for pj in os.listdir(json_preset_path) if ".json" in pj and pj not in ["Experimental_temperature.json","do_not_delete.json"]] + presets_files = sorted(presets_files, key=str.lower) + return {"required": { + "model": ("MODEL",), + "preset" : (presets_files, {"default": "Excellent_attention"}), + "uncond_sigma_end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "use_uncond_sigma_end_from_preset" : ("BOOLEAN", {"default": True}), + "automatic_cfg" : (["From preset","None", "soft", "hard", "hard_squared", "range"],), + }, + "optional":{ + "join_global_parameters": ("ATTNMOD", {"forceInput": True}), + }} + RETURN_TYPES = ("MODEL", "STRING", "STRING",) + RETURN_NAMES = ("Model", "Preset name", "Parameters as string",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG" + + def patch(self, model, preset, uncond_sigma_end, use_uncond_sigma_end_from_preset, automatic_cfg, join_global_parameters=None): + with open(os.path.join(json_preset_path, preset+".json"), 'r', encoding='utf-8') as f: + preset_args = json.load(f) + if not use_uncond_sigma_end_from_preset: + preset_args["uncond_sigma_end"] = uncond_sigma_end + preset_args["fake_uncond_sigma_end"] = uncond_sigma_end + preset_args["fake_uncond_exp_sigma_end"] = uncond_sigma_end + preset_args["uncond_exp_sigma_end"] = uncond_sigma_end + + if join_global_parameters is not None: + preset_args["attention_modifiers_global"] = preset_args["attention_modifiers_global"] + join_global_parameters + preset_args["attention_modifiers_global_enabled"] = True + + if automatic_cfg != "From preset": + preset_args["automatic_cfg"] = automatic_cfg + + advcfg = advancedDynamicCFG() + m = advcfg.patch(model, **preset_args)[0] + info_string = ",\n".join([f"\"{k}\": {v}" for k,v in preset_args.items() if v != ""]) + print(f"Preset {Fore.GREEN}{preset}{Fore.RESET} loaded successfully!") + return (m, preset, info_string,) + +class simpleDynamicCFGlerpUncond: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "boost" : ("BOOLEAN", {"default": True}), + "negative_strength": ("FLOAT", {"default": 1, "min": 0.0, "max": 5.0, "step": 0.1, "round": 0.1}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/presets" + + def patch(self, model, boost, negative_strength): + advcfg = advancedDynamicCFG() + m = advcfg.patch(model=model, + automatic_cfg="hard", skip_uncond=boost, + uncond_sigma_start = 15, uncond_sigma_end = 1, + lerp_uncond=negative_strength != 1, lerp_uncond_strength=negative_strength, + lerp_uncond_sigma_start = 15, lerp_uncond_sigma_end = 1 + )[0] + return (m, ) + +class postCFGrescaleOnly: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "subtract_latent_mean" : ("BOOLEAN", {"default": True}), + "subtract_latent_mean_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}), + "subtract_latent_mean_sigma_end": ("FLOAT", {"default": 7.5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}), + "latent_intensity_rescale" : ("BOOLEAN", {"default": True}), + "latent_intensity_rescale_method" : (["soft","hard","range"], {"default": "hard"},), + "latent_intensity_rescale_cfg" : ("FLOAT", {"default": 8, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.1}), + "latent_intensity_rescale_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}), + "latent_intensity_rescale_sigma_end": ("FLOAT", {"default": 5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/utils" + + def patch(self, model, + subtract_latent_mean, subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end, + latent_intensity_rescale, latent_intensity_rescale_method, latent_intensity_rescale_cfg, latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end + ): + advcfg = advancedDynamicCFG() + m = advcfg.patch(model=model, + subtract_latent_mean = subtract_latent_mean, + subtract_latent_mean_sigma_start = subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end = subtract_latent_mean_sigma_end, + latent_intensity_rescale = latent_intensity_rescale, latent_intensity_rescale_cfg = latent_intensity_rescale_cfg, latent_intensity_rescale_method = latent_intensity_rescale_method, + latent_intensity_rescale_sigma_start = latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end = latent_intensity_rescale_sigma_end, + ignore_pre_cfg_func = True + )[0] + return (m, ) + +class simpleDynamicCFGHighSpeed: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/presets" + + def patch(self, model): + advcfg = advancedDynamicCFG() + m = advcfg.patch(model=model, automatic_cfg = "hard", + skip_uncond = True, uncond_sigma_start = 7.5, uncond_sigma_end = 1)[0] + return (m, ) + +class simpleDynamicCFGwarpDrive: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "uncond_sigma_start": ("FLOAT", {"default": 5.5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "fake_uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/presets" + + def patch(self, model, uncond_sigma_start, uncond_sigma_end, fake_uncond_sigma_end): + advcfg = advancedDynamicCFG() + print(f" {Fore.CYAN}WARP DRIVE MODE ENGAGED!{Style.RESET_ALL}\n Settings suggestions:\n" + f" {Fore.GREEN}1/1/1: {Fore.YELLOW}Maaaxxxiiimum speeeeeed.{Style.RESET_ALL} {Fore.RED}Uncond disabled.{Style.RESET_ALL} {Fore.MAGENTA}Fasten your seatbelt!{Style.RESET_ALL}\n" + f" {Fore.GREEN}3/1/1: {Fore.YELLOW}Risky space-time continuum distortion.{Style.RESET_ALL} {Fore.MAGENTA}Awesome for prompts with a clear subject!{Style.RESET_ALL}\n" + f" {Fore.GREEN}5.5/1/1: {Fore.YELLOW}Frameshift Drive Autopilot: {Fore.GREEN}Engaged.{Style.RESET_ALL} {Fore.MAGENTA}Should work with anything but do it better and faster!{Style.RESET_ALL}") + + m = advcfg.patch(model=model, automatic_cfg = "hard", + skip_uncond = True, uncond_sigma_start = uncond_sigma_start, uncond_sigma_end = uncond_sigma_end, + fake_uncond_sigma_end = fake_uncond_sigma_end, fake_uncond_sigma_start = 1000, fake_uncond_start=True, + fake_uncond_exp=True,fake_uncond_exp_normalize=True,fake_uncond_exp_method="previous_average", + cond_exp = False, cond_exp_sigma_start = 9, cond_exp_sigma_end = uncond_sigma_start, cond_exp_method = "erf", cond_exp_normalize = True, + )[0] + return (m, ) + +class simpleDynamicCFGunpatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "unpatch" + + CATEGORY = "model_patches/Automatic_CFG/utils" + + def unpatch(self, model): + m = model.clone() + m.model_options.pop("sampler_pre_cfg_automatic_cfg_function", None) + return (m, ) + +class simpleDynamicCFGExcellentattentionPatch: + @classmethod + def INPUT_TYPES(s): + inputs = {"required": { + "model": ("MODEL",), + "Auto_CFG": ("BOOLEAN", {"default": True}), + "patch_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 1.0, "round": 0.01}), + "patch_cond": ("BOOLEAN", {"default": True}), + "patch_uncond": ("BOOLEAN", {"default": True}), + "light_patch": ("BOOLEAN", {"default": False}), + "mute_self_input_layer_8_cond": ("BOOLEAN", {"default": False}), + "mute_cross_input_layer_8_cond": ("BOOLEAN", {"default": False}), + "mute_self_input_layer_8_uncond": ("BOOLEAN", {"default": True}), + "mute_cross_input_layer_8_uncond": ("BOOLEAN", {"default": False}), + "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "bypass_layer_8_instead_of_mute": ("BOOLEAN", {"default": False}), + "save_as_preset": ("BOOLEAN", {"default": False}), + "preset_name": ("STRING", {"multiline": False}), + }, + "optional":{ + "attn_mod_for_positive_operation": ("ATTNMOD", {"forceInput": True}), + "attn_mod_for_negative_operation": ("ATTNMOD", {"forceInput": True}), + }, + } + if "dev_env.txt" in os.listdir(current_dir): + inputs['optional'].update({"attn_mod_for_global_operation": ("ATTNMOD", {"forceInput": True})}) + return inputs + + RETURN_TYPES = ("MODEL","STRING",) + RETURN_NAMES = ("Model", "Parameters as string",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG" + + def patch(self, model, Auto_CFG, patch_multiplier, patch_cond, patch_uncond, light_patch, + mute_self_input_layer_8_cond, mute_cross_input_layer_8_cond, + mute_self_input_layer_8_uncond, mute_cross_input_layer_8_uncond, + uncond_sigma_end,bypass_layer_8_instead_of_mute, save_as_preset, preset_name, + attn_mod_for_positive_operation = None, attn_mod_for_negative_operation = None, attn_mod_for_global_operation = None): + + parameters_as_string = "Excellent attention:\n" + "\n".join([f"{k}: {v}" for k, v in locals().items() if k not in ["self", "model"]]) + + with open(os.path.join(json_preset_path, "Excellent_attention.json"), 'r', encoding='utf-8') as f: + patch_parameters = json.load(f) + + attn_patch = {"sigma_start": 1000, "sigma_end": 0, + "self_attn_mod_eval": f"normalize_tensor(q+(q-attention_basic(attnbc, k, v, extra_options['n_heads'])))*attnbc.norm()*{patch_multiplier}", + "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"} + attn_patch_light = {"sigma_start": 1000, "sigma_end": 0, + "self_attn_mod_eval": f"q*{patch_multiplier}", + "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"} + + kill_self_input_8 = { + "sigma_start": 1000, + "sigma_end": 0, + "self_attn_mod_eval": "q" if bypass_layer_8_instead_of_mute else "torch.zeros_like(q)", + "unet_block_id_input": "8", + "unet_block_id_middle": "", + "unet_block_id_output": "", + "unet_attn": "attn1"} + + kill_cross_input_8 = kill_self_input_8.copy() + kill_cross_input_8['unet_attn'] = "attn2" + + attention_modifiers_positive = [] + attention_modifiers_fake_negative = [] + + if patch_cond: attention_modifiers_positive.append(attn_patch) if not light_patch else attention_modifiers_positive.append(attn_patch_light) + if mute_self_input_layer_8_cond: attention_modifiers_positive.append(kill_self_input_8) + if mute_cross_input_layer_8_cond: attention_modifiers_positive.append(kill_cross_input_8) + + if patch_uncond: attention_modifiers_fake_negative.append(attn_patch) if not light_patch else attention_modifiers_fake_negative.append(attn_patch_light) + if mute_self_input_layer_8_uncond: attention_modifiers_fake_negative.append(kill_self_input_8) + if mute_cross_input_layer_8_uncond: attention_modifiers_fake_negative.append(kill_cross_input_8) + + patch_parameters['attention_modifiers_positive'] = attention_modifiers_positive + patch_parameters['attention_modifiers_fake_negative'] = attention_modifiers_fake_negative + + if attn_mod_for_positive_operation is not None: + patch_parameters['attention_modifiers_positive'] = patch_parameters['attention_modifiers_positive'] + attn_mod_for_positive_operation + if attn_mod_for_negative_operation is not None: + patch_parameters['attention_modifiers_fake_negative'] = patch_parameters['attention_modifiers_fake_negative'] + attn_mod_for_negative_operation + if attn_mod_for_global_operation is not None: + patch_parameters["attention_modifiers_global_enabled"] = True + patch_parameters['attention_modifiers_global'] = attn_mod_for_global_operation + + patch_parameters["uncond_sigma_end"] = uncond_sigma_end + patch_parameters["fake_uncond_sigma_end"] = uncond_sigma_end + patch_parameters["automatic_cfg"] = "hard" if Auto_CFG else "None" + + if save_as_preset: + patch_parameters["save_as_preset"] = save_as_preset + patch_parameters["preset_name"] = preset_name + + advcfg = advancedDynamicCFG() + m = advcfg.patch(model, **patch_parameters)[0] + + return (m, parameters_as_string, ) + +class simpleDynamicCFGCustomAttentionPatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "Auto_CFG": ("BOOLEAN", {"default": True}), + "cond_mode" : (["replace_by_custom","normal+(normal-custom_cond)*multiplier","normal+(normal-custom_uncond)*multiplier"],), + "uncond_mode" : (["replace_by_custom","normal+(normal-custom_cond)*multiplier","normal+(normal-custom_uncond)*multiplier"],), + "cond_diff_multiplier": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.1, "round": 0.01}), + "uncond_diff_multiplier": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.1, "round": 0.01}), + "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000, "step": 0.1, "round": 0.01}), + "save_as_preset": ("BOOLEAN", {"default": False}), + "preset_name": ("STRING", {"multiline": False}), + }, + "optional":{ + "attn_mod_for_positive_operation": ("ATTNMOD", {"forceInput": True}), + "attn_mod_for_negative_operation": ("ATTNMOD", {"forceInput": True}), + }} + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("Model",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + + def patch(self, model, Auto_CFG, cond_mode, uncond_mode, cond_diff_multiplier, uncond_diff_multiplier, uncond_sigma_end, save_as_preset, preset_name, + attn_mod_for_positive_operation = [], attn_mod_for_negative_operation = []): + + with open(os.path.join(json_preset_path, "do_not_delete.json"), 'r', encoding='utf-8') as f: + patch_parameters = json.load(f) + + patch_parameters["cond_exp_value"] = cond_diff_multiplier + patch_parameters["uncond_exp_value"] = uncond_diff_multiplier + + if cond_mode != "replace_by_custom": + patch_parameters["disable_cond"] = False + if cond_mode == "normal+(normal-custom_cond)*multiplier": + patch_parameters["cond_exp_method"] = "subtract_attention_modifiers_input_using_cond" + elif cond_mode == "normal+(normal-custom_uncond)*multiplier": + patch_parameters["cond_exp_method"] = "subtract_attention_modifiers_input_using_uncond" + + if uncond_mode != "replace_by_custom": + patch_parameters["uncond_sigma_start"] = 1000.0 + patch_parameters["fake_uncond_exp"] = False + patch_parameters["uncond_exp"] = True + + if uncond_mode == "normal+(normal-custom_cond)*multiplier": + patch_parameters["uncond_exp_method"] = "subtract_attention_modifiers_input_using_cond" + elif uncond_mode == "normal+(normal-custom_uncond)*multiplier": + patch_parameters["uncond_exp_method"] = "subtract_attention_modifiers_input_using_uncond" + + if cond_mode != "replace_by_custom" and attn_mod_for_positive_operation != []: + smallest_sigma = min([float(x['sigma_end']) for x in attn_mod_for_positive_operation]) + patch_parameters["disable_cond_sigma_end"] = smallest_sigma + patch_parameters["cond_exp_sigma_end"] = smallest_sigma + + if uncond_mode != "replace_by_custom" and attn_mod_for_negative_operation != []: + smallest_sigma = min([float(x['sigma_end']) for x in attn_mod_for_negative_operation]) + patch_parameters["uncond_exp_sigma_end"] = smallest_sigma + patch_parameters["fake_uncond_start"] = False + # else: + # biggest_sigma = max([float(x['sigma_start']) for x in attn_mod_for_negative_operation]) + # patch_parameters["fake_uncond_sigma_start"] = biggest_sigma + + patch_parameters["automatic_cfg"] = "hard" if Auto_CFG else "None" + patch_parameters['attention_modifiers_positive'] = attn_mod_for_positive_operation + patch_parameters['attention_modifiers_negative'] = attn_mod_for_negative_operation + patch_parameters['attention_modifiers_fake_negative'] = attn_mod_for_negative_operation + patch_parameters["uncond_sigma_end"] = uncond_sigma_end + patch_parameters["fake_uncond_sigma_end"] = uncond_sigma_end + patch_parameters["save_as_preset"] = save_as_preset + patch_parameters["preset_name"] = preset_name + + advcfg = advancedDynamicCFG() + m = advcfg.patch(model, **patch_parameters)[0] + + return (m, ) + + + + +class attentionModifierSingleLayerBypassNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "block_name": (["input","middle","output"],), + "block_number": ("INT", {"default": 0, "min": 0, "max": 12, "step": 1}), + "unet_attn": (["attn1","attn2","both"],), + }, + "optional":{ + "join_parameters": ("ATTNMOD", {"forceInput": True}), + }} + + RETURN_TYPES = ("ATTNMOD","STRING",) + RETURN_NAMES = ("Attention modifier", "Parameters as string") + FUNCTION = "exec" + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + + def exec(self, sigma_start, sigma_end, block_name, block_number, unet_attn, join_parameters=None): + attn_modifier_dict = { + "sigma_start": sigma_start, "sigma_end": sigma_end, + "self_attn_mod_eval": "q", + "unet_block_id_input": str(block_number) if block_name == "input" else "", + "unet_block_id_middle": str(block_number) if block_name == "middle" else "", + "unet_block_id_output": str(block_number) if block_name == "output" else "", + "unet_attn": f"{unet_attn}" + } + + info_string = "\n".join([f"{k}: {v}" for k,v in attn_modifier_dict.items() if v != ""]) + + if unet_attn == "both": + attn_modifier_dict['unet_attn'] = "attn1" + copy_attn_modifier_dict = attn_modifier_dict.copy() + copy_attn_modifier_dict['unet_attn'] = "attn2" + out_modifiers = [attn_modifier_dict, copy_attn_modifier_dict] + else: + out_modifiers = [attn_modifier_dict] + + return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, ) + +class attentionModifierSingleLayerTemperatureNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}), + "block_name": (["input","middle","output"],), + "block_number": ("INT", {"default": 0, "min": 0, "max": 12, "step": 1}), + "unet_attn": (["attn1","attn2","both"],), + "temperature": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.01, "round": 0.01}), + }, + "optional":{ + "join_parameters": ("ATTNMOD", {"forceInput": True}), + }} + + RETURN_TYPES = ("ATTNMOD","STRING",) + RETURN_NAMES = ("Attention modifier", "Parameters as string") + FUNCTION = "exec" + CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers" + + def exec(self, sigma_start, sigma_end, block_name, block_number, unet_attn, temperature, join_parameters=None): + attn_modifier_dict = { + "sigma_start": sigma_start, "sigma_end": sigma_end, + "self_attn_mod_eval": f"temperature_patcher({temperature}).attention_basic_with_temperature(q, k, v, extra_options)", + "unet_block_id_input": str(block_number) if block_name == "input" else "", + "unet_block_id_middle": str(block_number) if block_name == "middle" else "", + "unet_block_id_output": str(block_number) if block_name == "output" else "", + "unet_attn": f"{unet_attn}" + } + + info_string = "\n".join([f"{k}: {v}" for k,v in attn_modifier_dict.items() if v != ""]) + + if unet_attn == "both": + attn_modifier_dict['unet_attn'] = "attn1" + copy_attn_modifier_dict = attn_modifier_dict.copy() + copy_attn_modifier_dict['unet_attn'] = "attn2" + out_modifiers = [attn_modifier_dict, copy_attn_modifier_dict] + else: + out_modifiers = [attn_modifier_dict] + + return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, ) + +class uncondZeroNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "scale": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01, "round": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/Automatic_CFG" + + def patch(self, model, scale): + def custom_patch(args): + cond_pred = args["cond_denoised"] + input_x = args["input"] + if args["sigma"][0] <= 1: + return input_x - cond_pred + cond = input_x - cond_pred + uncond = input_x - torch.zeros_like(cond) + return uncond + scale * (cond - uncond) + + m = model.clone() + m.set_model_sampler_cfg_function(custom_patch) + return (m, ) diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes_sag_custom.py b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes_sag_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..85ef8958ec1341a4c54f96b942145d255e0e7481 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/nodes_sag_custom.py @@ -0,0 +1,190 @@ +import torch +from torch import einsum +import torch.nn.functional as F +import math + +from einops import rearrange, repeat +import os +from comfy.ldm.modules.attention import optimized_attention, _ATTN_PRECISION +import comfy.samplers + +# from comfy/ldm/modules/attention.py +# but modified to return attention scores as well as output +def attention_basic_with_sim(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + + h = heads + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) + + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION =="fp32": + sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale + else: + sim = einsum('b i d, b j d -> b i j', q, k) * scale + + del q, k + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return (out, sim) + +def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): + # reshape and GAP the attention map + _, hw1, hw2 = attn.shape + b, _, lh, lw = x0.shape + attn = attn.reshape(b, -1, hw1, hw2) + # Global Average Pool + mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold + ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length() + mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] + + # Reshape + mask = ( + mask.reshape(b, *mid_shape) + .unsqueeze(1) + .type(attn.dtype) + ) + # Upsample + mask = F.interpolate(mask, (lh, lw)) + + blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma) + blurred = blurred * mask + x0 * (1 - mask) + return blurred + +def gaussian_blur_2d(img, kernel_size, sigma): + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + + x_kernel = pdf / pdf.sum() + x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) + + kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) + kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) + + padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] + + img = F.pad(img, padding, mode="reflect") + img = F.conv2d(img, kernel2d, groups=img.shape[-3]) + return img + +def get_denoised_ranges(latent, measure="hard", top_k=0.25): + chans = [] + for x in range(len(latent)): + max_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=True).values + min_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=False).values + max_val = torch.mean(max_values).item() + min_val = torch.mean(torch.abs(min_values)).item() if (measure == "hard" or measure == "range") else abs(torch.mean(min_values).item()) + denoised_range = (max_val + min_val) / 2 + chans.append(denoised_range) + return chans + +class SelfAttentionGuidanceCustom: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 100.0, "step": 0.1}), + "blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}), + "sigma_start": ("FLOAT", {"default": 15.0, "min": 0.0, "max": 1000.0, "step": 0.1, "round": 0.1}), + "sigma_end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.1, "round": 0.1}), + "auto_scale" : ("BOOLEAN", {"default": False}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches" + + def patch(self, model, scale, blur_sigma, sigma_start, sigma_end, auto_scale): + m = model.clone() + + attn_scores = None + + # TODO: make this work properly with chunked batches + # currently, we can only save the attn from one UNet call + def attn_and_record(q, k, v, extra_options): + nonlocal attn_scores + # if uncond, save the attention scores + heads = extra_options["n_heads"] + cond_or_uncond = extra_options["cond_or_uncond"] + b = q.shape[0] // len(cond_or_uncond) + if 1 in cond_or_uncond: + uncond_index = cond_or_uncond.index(1) + # do the entire attention operation, but save the attention scores to attn_scores + (out, sim) = attention_basic_with_sim(q, k, v, heads=heads) + # when using a higher batch size, I BELIEVE the result batch dimension is [uc1, ... ucn, c1, ... cn] + n_slices = heads * b + attn_scores = sim[n_slices * uncond_index:n_slices * (uncond_index+1)] + return out + else: + return optimized_attention(q, k, v, heads=heads) + + def post_cfg_function(args): + nonlocal attn_scores + uncond_attn = attn_scores + + sag_scale = scale + sag_sigma = blur_sigma + sag_threshold = 1.0 + model = args["model"] + uncond_pred = args["uncond_denoised"] + uncond = args["uncond"] + cfg_result = args["denoised"] + sigma = args["sigma"] + model_options = args["model_options"] + x = args["input"] + if uncond_pred is None or uncond is None or uncond_attn is None: + return cfg_result + if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding + return cfg_result + if sigma[0] > sigma_start or sigma[0] < sigma_end: + return cfg_result + # create the adversarially blurred image + degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold) + degraded_noised = degraded + x - uncond_pred + # call into the UNet + (sag, _) = comfy.samplers.calc_cond_batch(model, [uncond, None], degraded_noised, sigma, model_options) + # comfy.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options) + + if auto_scale: + denoised_tmp = cfg_result + (degraded - sag) * 8 + for b in range(len(denoised_tmp)): + denoised_ranges = get_denoised_ranges(denoised_tmp[b]) + for c in range(len(denoised_tmp[b])): + fixed_scale = (sag_scale / 10) / denoised_ranges[c] + denoised_tmp[b][c] = cfg_result[b][c] + (degraded[b][c] - sag[b][c]) * fixed_scale + return denoised_tmp + + return cfg_result + (degraded - sag) * sag_scale + + m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=False) + + # from diffusers: + # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch + m.set_model_attn1_replace(attn_and_record, "middle", 0, 0) + + return (m, ) \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/A subtle touch.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/A subtle touch.json new file mode 100644 index 0000000000000000000000000000000000000000..682379eaa467a6893b2e39800f66deceb53fee1b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/A subtle touch.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.sin()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 15.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 1.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 1.json new file mode 100644 index 0000000000000000000000000000000000000000..c278a6f306200eb15743ea2f64281bcb8fec8bc4 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 1.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "cond_pred", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 1.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 2.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 2.json new file mode 100644 index 0000000000000000000000000000000000000000..03492d223d1a92d4e7b6a152411069498a8a607b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 2.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 3.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "cond_pred", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 3.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 1.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 3.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 3.json new file mode 100644 index 0000000000000000000000000000000000000000..cb6df8adf12af4d039318989bb9f93922e6eac5c --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Crossed conds customized 3.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "cond_pred", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Enhanced_details_and_tweaked_attention b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Enhanced_details_and_tweaked_attention new file mode 100644 index 0000000000000000000000000000000000000000..fd910068eae69caa1f614ca82709175dc5ca8c82 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Enhanced_details_and_tweaked_attention @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "-q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": true, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 15.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "cond_pred", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 15.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": true, "uncond_exp_sigma_end": 0.4, "uncond_exp_sigma_start": 15.0, "uncond_exp_value": 0.5, "uncond_sigma_end": 0.4, "uncond_sigma_start": 15.0} diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Excellent_attention.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Excellent_attention.json new file mode 100644 index 0000000000000000000000000000000000000000..c953aca9d95c77426be714d3e63be0e496b1bf54 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Excellent_attention.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(q+(q-attention_basic(attnbc, k, v, extra_options['n_heads'])))*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(q+(q-attention_basic(attnbc, k, v, extra_options['n_heads'])))*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/For magic.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/For magic.json new file mode 100644 index 0000000000000000000000000000000000000000..212d64dd706b057aa3a9d00a891844826e1d5d4f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/For magic.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.sin()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q/2", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 2.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 15.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Kickstart.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Kickstart.json new file mode 100644 index 0000000000000000000000000000000000000000..c9349de55e3e5c90d4331c0de434a2879bf95ace --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Kickstart.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 7, "self_attn_mod_eval": "attnbc*2", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 7, "sigma_end": 2, "self_attn_mod_eval": "attnbc*2", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 7, "self_attn_mod_eval": "attnbc*2", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 7, "sigma_end": 2, "self_attn_mod_eval": "attnbc*2", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 0.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 and attn2 for uncond.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 and attn2 for uncond.json new file mode 100644 index 0000000000000000000000000000000000000000..dea0994458411326b777c853571d7197822b7f0d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 and attn2 for uncond.json @@ -0,0 +1,72 @@ +{"lerp_uncond_sigma_start": 15.0, +"lerp_uncond_sigma_end": 1.0, +"subtract_latent_mean": false, +"subtract_latent_mean_sigma_start": 15.0, +"subtract_latent_mean_sigma_end": 1.0, +"latent_intensity_rescale": false, +"latent_intensity_rescale_sigma_start": 15.0, +"latent_intensity_rescale_sigma_end": 3.0, +"ignore_pre_cfg_func": false, +"auto_cfg_topk": 0.25, +"attention_modifiers_global_enabled": false, +"attention_modifiers_global": [], +"disable_cond": false, +"disable_cond_sigma_start": 1000.0, +"disable_cond_sigma_end": 0.0, +"kwargs": {}, +"attention_modifiers_fake_negative": [{"sigma_start": 1000, +"sigma_end": 0, +"self_attn_mod_eval": "torch.zeros_like(q)", +"unet_block_id_input": "8", +"unet_block_id_middle": "", +"unet_block_id_output": "", +"unet_attn": "attn1"}, +{"sigma_start": 1000, +"sigma_end": 0, +"self_attn_mod_eval": "torch.zeros_like(q)", +"unet_block_id_input": "8", +"unet_block_id_middle": "", +"unet_block_id_output": "", +"unet_attn": "attn2"}], +"attention_modifiers_negative": [], +"attention_modifiers_positive": [], +"auto_cfg_ref": 8.0, +"automatic_cfg": "hard", +"cond_exp": false, +"cond_exp_method": "attention_modifiers_input_using_cond", +"cond_exp_normalize": false, +"cond_exp_sigma_end": 0.0, +"cond_exp_sigma_start": 1000.0, +"cond_exp_value": 0.3333333333333333, +"eval_string_cond": "", +"eval_string_fake": "", +"eval_string_uncond": "", +"fake_uncond_exp": true, +"fake_uncond_exp_method": "attention_modifiers_input_using_uncond", +"fake_uncond_exp_normalize": false, +"fake_uncond_exp_value": 1000.0, +"fake_uncond_multiplier": 1, +"fake_uncond_sigma_end": 0.0, +"fake_uncond_sigma_start": 1000.0, +"fake_uncond_start": true, +"latent_intensity_rescale_cfg": 8.0, +"latent_intensity_rescale_method": "hard", +"lerp_uncond": false, +"lerp_uncond_strength": 2.0, +"not_in_filter": ["self", +"model", +"args", +"args_filter", +"save_as_preset", +"preset_name", +"model_options_copy", +"eval_string"], +"skip_uncond": true, +"uncond_exp": false, +"uncond_exp_method": "amplify", +"uncond_exp_normalize": false, +"uncond_exp_sigma_end": 0.0, +"uncond_exp_sigma_start": 1000.0, +"uncond_exp_value": 0.3333333333333333, +"uncond_sigma_end": 0.0, +"uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 for uncond.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 for uncond.json new file mode 100644 index 0000000000000000000000000000000000000000..16397575a13c24a1624fbb0fe6b7b9f466b36957 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn1 for uncond.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn2 for uncond.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn2 for uncond.json new file mode 100644 index 0000000000000000000000000000000000000000..1c4f831bbd496510bfabdaa14cd75ff620823dc3 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Mute input layer 8 attn2 for uncond.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (new version).json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (new version).json new file mode 100644 index 0000000000000000000000000000000000000000..bad049f38831e3642f5bbec5de1ea3b4c7ebe063 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (new version).json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (old version).json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (old version).json new file mode 100644 index 0000000000000000000000000000000000000000..04f8851047e1d868376af94d76ddc88395321565 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Original automatic CFG (old version).json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "soft", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Potato Attention Guidance.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Potato Attention Guidance.json new file mode 100644 index 0000000000000000000000000000000000000000..e25d4c619f8d44897a89f2407b29501ece89dbde --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Potato Attention Guidance.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q*k*v", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 2.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 15.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expert.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expert.json new file mode 100644 index 0000000000000000000000000000000000000000..59d00e2d66c8ae4df0749849c3cf6c419355d9c2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expert.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 2.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 15.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expertNegative.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expertNegative.json new file mode 100644 index 0000000000000000000000000000000000000000..f7f22f1a430d240c2cacb89b2db8f5b87c9c568b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/Quack expertNegative.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 2.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Analog_photo_helper.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Analog_photo_helper.json new file mode 100644 index 0000000000000000000000000000000000000000..23191c776b6ab930da101b918bc79224cab61f1b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Analog_photo_helper.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "2", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "2", "unet_attn": "attn2"}], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Photorealistic_helper.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Photorealistic_helper.json new file mode 100644 index 0000000000000000000000000000000000000000..c1f23fd4bb2d97fd60d5f06fc6e3f97dd1483cfe --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Photorealistic_helper.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "attnopt*1.2", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "attnopt*1.2", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "3", "unet_attn": "attn2"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "attnopt*0.9", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "4,5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "attnopt*0.9", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "4,5", "unet_attn": "attn2"}], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_TOO_MANY_FINGERS.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_TOO_MANY_FINGERS.json new file mode 100644 index 0000000000000000000000000000000000000000..6664bcc720a7da0150f15dce52d88027941dc48f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_TOO_MANY_FINGERS.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "attnopt + (\nattnopt - (\nattnfunc(blur_tensor(q), k, v, extra_options['n_heads']) + v-blur_tensor(v)\n)\n) * 0.5", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Vector_Art.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Vector_Art.json new file mode 100644 index 0000000000000000000000000000000000000000..467e446c9d88896e06f641606e7aa39c12f0b8c9 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/SDXL_Vector_Art.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q*1.5", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "2", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q*1.5", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "2", "unet_attn": "attn2"}], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "amplify", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/The red riding latent.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/The red riding latent.json new file mode 100644 index 0000000000000000000000000000000000000000..55e3802c186f09cffbb05ad5ca9fa081fbba975f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/The red riding latent.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "2,7", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q/2", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "8", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "v/2", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_negative": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "v.abs().exp()*v.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_positive": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 3, "self_attn_mod_eval": "q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "1", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "5,7", "unet_block_id_middle": "", "unet_block_id_output": "2", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.abs().exp()*q.sign()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "4", "unet_attn": "attn2"}, {"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q/2", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "7", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/do_not_delete.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/do_not_delete.json new file mode 100644 index 0000000000000000000000000000000000000000..3384aa8eec0550be4fcc96a82e9fb2b59b69c126 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/do_not_delete.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/everything_turned_off.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/everything_turned_off.json new file mode 100644 index 0000000000000000000000000000000000000000..309257c7411e5118929056389db590d286223833 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/everything_turned_off.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "None", "cond_exp": false, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 0.0, "uncond_sigma_start": 150.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/experimental_temperature_setting.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/experimental_temperature_setting.json new file mode 100644 index 0000000000000000000000000000000000000000..ff25a27999037655826ab5362cb19a8fe8d479d1 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/experimental_temperature_setting.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(0.9).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(0.85).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "7", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(0.9).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(0.85).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "7", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(2).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "temperature_patcher(0.85).attention_basic_with_temperature(q, k, v, extra_options)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn1"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_fast_version.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_fast_version.json new file mode 100644 index 0000000000000000000000000000000000000000..11875e2cb2ada84bde73e057836da626db3085d9 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_fast_version.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": true, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "normalize_tensor(attnbc+(attnbc-q)*0.3)*attnbc.norm()", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "None", "cond_exp": true, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.5, "uncond_sigma_end": 0.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_normal_version.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_normal_version.json new file mode 100644 index 0000000000000000000000000000000000000000..d251da1ab5a537d1c2ddf80f736873fa38880edc --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_normal_version.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "None", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": true, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.5, "uncond_sigma_end": 1.0, "uncond_sigma_start": 0.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version.json new file mode 100644 index 0000000000000000000000000000000000000000..dc252e66ae5d1c7a472ac8ca7130242f3265554e --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "None", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.5, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.5, "uncond_sigma_end": 0.0, "uncond_sigma_start": 1000.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version_lighter_effect.json b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version_lighter_effect.json new file mode 100644 index 0000000000000000000000000000000000000000..a4229487b9ee6661d8eae5921cf2a9e1000f8ee1 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/presets/reinforced_style_slow_version_lighter_effect.json @@ -0,0 +1 @@ +{"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": false, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {"kwargs": {}, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"]}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "q", "unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": "5", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "None", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1000.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3, "uncond_sigma_end": 0.0, "uncond_sigma_start": 1000.0} \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/pyproject.toml b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..f5625d9079e345049a6e57f5a050fa377901c044 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "comfyui-automaticcfg" +description = "My own version 'from scratch' of a self-rescaling CFG. It isn't much but it's honest work.\nTLDR: set your CFG at 8 to try it. No burned images and artifacts anymore. CFG is also a bit more sensitive because it's a proportion around 8. Low scale like 4 also gives really nice results since your CFG is not the CFG anymore. Also in general even with relatively low settings it seems to improve the quality." +version = "1.0.0" +license = "LICENSE" +dependencies = ["colorama"] + +[project.urls] +Repository = "https://github.com/Extraltodeus/ComfyUI-AutomaticCFG" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "extraltodeus" +DisplayName = "ComfyUI-AutomaticCFG" +Icon = "" diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/requirements.txt b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d90aaa5fcacf1730f7ace07e576ba9bff7bc562 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/requirements.txt @@ -0,0 +1 @@ +colorama \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SD15 AYS Warp drive workflow.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SD15 AYS Warp drive workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..d0d7a43f6a9c07aa6d58d410fc6f70dab4bf538d Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SD15 AYS Warp drive workflow.png differ diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SDXL AYS Warp drive variation.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SDXL AYS Warp drive variation.png new file mode 100644 index 0000000000000000000000000000000000000000..557fa91f391b345169e4892a4cc650420e501ab7 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/10 steps SDXL AYS Warp drive variation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0389754312423e7fd35c5b138ce3e6418bf849b4fe50ea690143396e0a9707e +size 1395811 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/11728UI_00001_.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/11728UI_00001_.png new file mode 100644 index 0000000000000000000000000000000000000000..b8d2ae5a6aeb95b0c948cf56fc66117ac87bf449 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/11728UI_00001_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72b4be95fcda15ad2fe15f220ea44fa649093c3db4058939ecf527df2f7c569a +size 1520172 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12 steps SDXL AYS Warp drive workflow.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12 steps SDXL AYS Warp drive workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..e3f5e85ba2ffb8283f8a5b2623e3958bdc36ec11 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12 steps SDXL AYS Warp drive workflow.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:651347f0c94c532e6c01084bc9162a24e9f4d74efaf749e4c65a72ba1fa9a377 +size 1412765 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12steps.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12steps.png new file mode 100644 index 0000000000000000000000000000000000000000..78f582ca3b9c599a876fc25c89e1f24d87f77750 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/12steps.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abdfae1e1cd0766a31b8b150973e2edb05fe7bb71be48351690cf496ad712507 +size 2270171 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/24steps.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/24steps.png new file mode 100644 index 0000000000000000000000000000000000000000..49da45f2cb071c9237fccb04b1b1f1b2cdb7a219 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/24steps.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9927b71d332ac1e4b45d379d5e440edc63da7e879ae82269e978b0277ec080dc +size 2313232 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/00382UI_00001_.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/00382UI_00001_.png new file mode 100644 index 0000000000000000000000000000000000000000..3712cae2e8bef52b879a858669767a9952954624 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/00382UI_00001_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26a9580911306c695f70518628eccb8b13a87d6fd0cf97aba71134f5662b9184 +size 1607890 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01207UI_00001_.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01207UI_00001_.png new file mode 100644 index 0000000000000000000000000000000000000000..dec3f9e4f21e93916ca1ba23ece655b9313ee8c5 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01207UI_00001_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3b7173383ca3d7add7d4c6432c339e2fea40f8b83f2be94754ad82ea774ee2a +size 1156512 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01217UI_00001_.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01217UI_00001_.png new file mode 100644 index 0000000000000000000000000000000000000000..d7310ea6fb04a7c889f95756e73773348e00b692 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/01217UI_00001_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73294481ddeb97776cd3253267c8520c581ce800f463f0d159df7ca17b5da910 +size 1124343 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/a bad upscale looks like low quality jpeg.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/a bad upscale looks like low quality jpeg.png new file mode 100644 index 0000000000000000000000000000000000000000..51d12dd6b31af697b993b0cac207cdd4138cd3f0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/a bad upscale looks like low quality jpeg.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a206f762fa1c639168d7093a5c583f68e45d4a2b910d7b7ac03c13fac69fc2e8 +size 1763471 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/another bad upscale looking like jpeg.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/another bad upscale looking like jpeg.png new file mode 100644 index 0000000000000000000000000000000000000000..409569c10e94fd53c0c5e88ecea0fed4c0f92e88 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/another bad upscale looking like jpeg.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:119b03c3f8b2a6ebbe9c1e9ae2f4ddfaccb371d4946e1f72eaa9f77273cfc959 +size 1658386 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/intradasting.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/intradasting.png new file mode 100644 index 0000000000000000000000000000000000000000..169c3b9dc728852bdf9b0f2046173dcdbbcdd909 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/intradasting.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f40f50652e976a92ec0c58585e1ad70d74102c2efff99bf3acb1d0bee538008 +size 1154874 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/laule.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/laule.png new file mode 100644 index 0000000000000000000000000000000000000000..aa2fd29bb2a002069ef4223b6baa670cd05d6332 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/laule.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35e9980c5b9d582133db88e611356b6107a1a6fc6c3616e0a0b4f6543b3cfa30 +size 1083336 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/niiiiiice.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/niiiiiice.png new file mode 100644 index 0000000000000000000000000000000000000000..a3672b5cbd3af44191ad92f29dfb3262711c8ae0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/niiiiiice.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebcb2bc2670328a5b2440281a66b1816d874b3b51e53fe1e7028c908b92f966f +size 1067735 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/special double pass.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/special double pass.png new file mode 100644 index 0000000000000000000000000000000000000000..6f5966362b80fcf8fb5685ec810b2d81031e35d6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/special double pass.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:762d629eda2a233d10eae3a12f2fa47b1d749f645a30218ba99b34ec73b4884d +size 1198797 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/web.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/web.png new file mode 100644 index 0000000000000000000000000000000000000000..3aaec0044bb98a6b051edcffc6b75603d00a1885 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/I'm just throwing a few here that I find nice/web.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba4d39e812c64be8d4e2ec91415e88ca1a1146bb35b7726719ad799076bcd85 +size 1033299 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/My current go-to settings.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/My current go-to settings.png new file mode 100644 index 0000000000000000000000000000000000000000..92b9c3283dcc94d25ae7c5b97527bd4b88cf5720 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/My current go-to settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3bf7249dcfd7d7f52a75016480f827f4fd161e7ad62fa3fc751349c9d3722a9 +size 1469959 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/README.md b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/README.md new file mode 100644 index 0000000000000000000000000000000000000000..67ca9b0ce6c4d29ba64f125069cba8d0c8b52fb9 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/README.md @@ -0,0 +1,60 @@ +# Workflows + +## 10 steps + +### SD 1.5 + +![one](./10%20steps%20SD15%20AYS%20Warp%20drive%20workflow.png) + +### SDXL Warp Drive + +![two](./10%20steps%20SDXL%20AYS%20Warp%20drive%20variation.png) + +## 12 Steps + +![12steps](./12steps.png) + +### SDXL + +![12 steps SDXL](./12%20steps%20SDXL%20AYS%20Warp%20drive%20workflow.png) + +## I don't know what this one is + +![shruggie](./11728UI_00001_.png) + +## Attention Modifiers + +![attention modifiers explanations](./attention_modifiers_explainations.png) + +## Dat Random Gurl + +![dat random gurl](./dat_random_gurl.png) + +## Go-to Settings + +![Current go-to settings](./My%20current%20go-to%20settings.png) + +## Potato Attention Guidance + +![potato attention guidance](./potato%20attention%20guidance.png) + +## Simple SD Upscale + +![simple SD upscale](./simple%20SD%20upscale.png) + +## Start by this one + +![Start](./Start_by_this_one.png) + +## Others + +![Other one]() +![Other two]() +![Other three]() +![Other four]() +![Other five]() +![Other six]() +![Other seven]() +![Other eight]() +![Other nine]() +![Other ten]() \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/Start_by_this_one.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/Start_by_this_one.png new file mode 100644 index 0000000000000000000000000000000000000000..8fd0e3189d78e1d1035e625a1a54aa617ac05ee5 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/Start_by_this_one.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c17f55afa41464b9ea50f69dd70bc640256e3a41c04c70bcb50f65695743f4d5 +size 1207763 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/attention_modifiers_explainations.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/attention_modifiers_explainations.png new file mode 100644 index 0000000000000000000000000000000000000000..e28166195860e09c6df3401dc1338d4e0991b3fe --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/attention_modifiers_explainations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe6da80321c9bb5a3b9de69ce61d9bb506223620a935c490cd274d2027e1d9a2 +size 1401212 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/dat_random_gurl.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/dat_random_gurl.png new file mode 100644 index 0000000000000000000000000000000000000000..b54fcd83ac7ca7c31996a05596d1234404fde40b Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/dat_random_gurl.png differ diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/if you need the write text to image node.txt b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/if you need the write text to image node.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a9910e4910b8b1df8c588a68ca72ea20e6b602b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/if you need the write text to image node.txt @@ -0,0 +1,7 @@ +It's here: + +https://github.com/Extraltodeus/temp/blob/main/image_PIL_text.py + +Also you will need to create a folder named "fonts" in your main ComfyUI folder and add at least one. +Roboto is nice: +https://fonts.google.com/specimen/Roboto diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/potato attention guidance.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/potato attention guidance.png new file mode 100644 index 0000000000000000000000000000000000000000..a545fd4fa264acf1747c3c52027cb90282a1bf5a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/potato attention guidance.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c937bd2a4567639f3580da8508f926a52fb63b865fcf99e654b17d5ed82b771f +size 1238998 diff --git a/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/simple SD upscale.png b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/simple SD upscale.png new file mode 100644 index 0000000000000000000000000000000000000000..d2709f57b3d3e6324d784bde34936547d95e2dd9 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-AutomaticCFG/workflows/simple SD upscale.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83dc24539848be14faca8137d717596334a10a710a72f1672e621ef34913a13c +size 3332239 diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/LICENSE b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d14feaabbe07990aba4f77e894bb8eddacae82bf --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 pythongosssss + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/README.md b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..16855e73e22e1d7a5d151e871724113b0dde02e6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/README.md @@ -0,0 +1,402 @@ +# ComfyUI-Custom-Scripts + +### ⚠️ While these extensions work for the most part, i'm very busy at the moment and so unable to keep on top of everything here, thanks for your patience! + +# Installation + +1. Clone the repository: +`git clone https://github.com/pythongosssss/ComfyUI-Custom-Scripts.git` +to your ComfyUI `custom_nodes` directory + + The script will then automatically install all custom scripts and nodes. + It will attempt to use symlinks and junctions to prevent having to copy files and keep them up to date. + +- For uninstallation: + - Delete the cloned repo in `custom_nodes` + - Ensure `web/extensions/pysssss/CustomScripts` has also been removed + +# Update +1. Navigate to the cloned repo e.g. `custom_nodes/ComfyUI-Custom-Scripts` +2. `git pull` + +# Features + +## Autocomplete +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/b5971135-414f-4f4e-a6cf-2650dc01085f) +Provides embedding and custom word autocomplete. You can view embedding details by clicking on the info icon on the list. +Define your list of custom words via the settings. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/160ef61c-7d7e-49d0-b60f-5a1501b74c9d) +You can quickly default to danbooru tags using the Load button, or load/manage other custom word lists. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/cc180b35-5f45-442f-9285-3ddf3fa320d0) + +## Auto Arrange Graph +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/04b06081-ca6f-4c0f-8584-d0a157c36747) +Adds a menu option to auto arrange the graph in order of execution, this makes very wide graphs! + +## Always Snap to Grid +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/66f36d1f-e579-4959-9880-9a9624922e3a) +Adds a setting to make moving nodes always snap to grid. + +## [Testing] "Better" Loader Lists +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/664caa71-f25f-4a96-a04a-1466d6b2b8b4) +Adds custom Lora and Checkpoint loader nodes, these have the ability to show preview images, just place a png or jpg next to the file and it'll display in the list on hover (e.g. sdxl.safetensors and sdxl.png). +Optionally enable subfolders via the settings: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/e15b5e83-4f9d-4d57-8324-742bedf75439) +Adds an "examples" widget to load sample prompts, triggerwords, etc: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ad1751e4-4c85-42e7-9490-e94fb1cbc8e7) +These should be stored in a folder matching the name of the model, e.g. if it is `loras/add_detail.safetensors` put your files in as `loras/add_detail/*.txt` +To quickly save a generated image as the preview to use for the model, you can right click on an image on a node, and select Save as Preview and choose the model to save the preview for: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/9fa8e9db-27b3-45cb-85c2-0860a238fd3a) + +## Checkpoint/LoRA/Embedding Info +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/6b67bf40-ee17-4fa6-a0c1-7947066bafc2) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/32405df6-b367-404f-a5df-2d4347089a9e) +Adds "View Info" menu option to view details about the selected LoRA or Checkpoint. To view embedding details, click the info button when using embedding autocomplete. + +## Constrain Image +Adds a node for resizing an image to a max & min size optionally cropping if required. + +## Custom Colors +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/fa7883f3-f81c-49f6-9ab6-9526e4debab6) +Adds a custom color picker to nodes & groups + +## Favicon Status +![image](https://user-images.githubusercontent.com/125205205/230171227-31f061a6-6324-4976-bed9-723a87500cf3.png) +![image](https://user-images.githubusercontent.com/125205205/230171445-c7202a45-b511-4d69-87fa-945ad44c063f.png) +Adds a favicon and title to the window, favicon changes color while generating and the window title includes the number of prompts in the queue + +## Image Feed +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/caea0d48-85b9-4ca9-9771-5c795db35fbc) +Adds a panel showing images that have been generated in the current session, you can control the direction that images are added and the position of the panel via the ComfyUI settings screen and the size of the panel and the images via the sliders at the top of the panel. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ca093d38-41a3-4647-9223-5bd0b9ee4f1e) + +## KSampler (Advanced) denoise helper +Provides a simple method to set custom denoise on the advanced sampler +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/42946bd8-0078-4c7a-bfe9-7adb1382b5e2) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7cfccb22-f155-4848-934b-a2b2a6efe16f) + +## Lock Nodes & Groups +![image](https://user-images.githubusercontent.com/125205205/230172868-5c5a943c-ade1-4799-bf80-cc931da5d4b2.png) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/cfca09d9-38e5-4ecd-8b73-1455009fcd67) +Adds a lock option to nodes & groups that prevents you from moving them until unlocked + +## Math Expression +Allows for evaluating complex expressions using values from the graph. You can input `INT`, `FLOAT`, `IMAGE` and `LATENT` values. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/1593edde-67b8-45d8-88cb-e75f52dba039) +Other nodes values can be referenced via the `Node name for S&R` via the `Properties` menu item on a node, or the node title. +Supported operators: `+ - * /` (basic ops) `//` (floor division) `**` (power) `^` (xor) `%` (mod) +Supported functions `floor(num, dp?)` `floor(num)` `ceil(num)` `randomint(min,max)` +If using a `LATENT` or `IMAGE` you can get the dimensions using `a.width` or `a.height` where `a` is the input name. + +## Node Finder +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/177d2b67-acbc-4ec3-ab31-7c295a98c194) +Adds a menu item for following/jumping to the executing node, and a menu to quickly go to a node of a specific type. + +## Preset Text +![image](https://user-images.githubusercontent.com/125205205/230173939-08459efc-785b-46da-93d1-b02f0300c6f4.png) +Adds a node that lets you save and use text presets (e.g. for your 'normal' negatives) + +## Quick Nodes +![image](https://user-images.githubusercontent.com/125205205/230174266-5232831a-a03b-4bf7-bc8b-c45466a0bc64.png) +Adds various menu items to some nodes for quickly setting up common parts of graphs + +## Play Sound +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/9bcf9fb3-5898-4432-a974-fb1e17d3b7e8) +Plays a sound when the node is executed, either after each prompt or only when the queue is empty for queuing multiple prompts. +You can customize the sound by replacing the mp3 file `web/extensions/pysssss/CustomScripts/assets\notify.mp3` + +## System Notification +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/30354775/993fd783-5cd6-4779-aa97-173bc06cc405) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/30354775/e45227fb-5714-4f45-b96b-6601902ef6e2) + +Sends a system notification via the browser when the node is executed, either after each prompt or only when the queue is empty for queuing multiple prompts. + +## [WIP] Repeater +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ec0dac25-14e4-4d44-b975-52193656709d) +Node allows you to either create a list of N repeats of the input node, or create N outputs from the input node. +You can optionally decide if you want to reuse the input node, or create a new instance each time (e.g. a Checkpoint Loader would want to be re-used, but a random number would want to be unique) +TODO: Type safety on the wildcard outputs to require match with input + +## Show Text +![image](https://user-images.githubusercontent.com/125205205/230174888-c004fd48-da78-4de9-81c2-93a866fcfcd1.png) +Takes input from a node that produces a string and displays it, useful for things like interrogator, prompt generators, etc. + +## Show Image on Menu +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/b6ab58f2-583b-448c-bcfc-f93f5cdab0fc) +Shows the current generating image on the menu at the bottom, you can disable this via the settings menu. + +## String Function +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/01107137-8a93-4765-bae0-fcc110a09091) +Supports appending and replacing text +`tidy_tags` will add commas between parts when in `append` mode. +`replace` mode supports regex replace by using `/your regex here/` and you can reference capturing groups using `\number` e.g. `\1` + +## Touch Support +Provides basic support for touch screen devices, its not perfect but better than nothing + +## Widget Defaults +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/3d675032-2b19-4da8-a7d7-fa2d7c555daa) +Allows you to specify default values for widgets when adding new nodes, the values are configured via the settings menu +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7b57a3d8-98d3-46e9-9b33-6645c0da41e7) + +## Workflows +Adds options to the menu for saving + loading workflows: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7b5a3012-4c59-47c6-8eea-85cf534403ea) + +## Workflow Images +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/06453fd2-c020-46ee-a7db-2b8bf5bcba7e) +Adds menu options for importing/exporting the graph as SVG and PNG showing a view of the nodes + +## (Testing) Reroute Primitive +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/8b870eef-d572-43f9-b394-cfa7abbd2f98) Provides a node that allows rerouting primitives. +The node can also be collapsed to a single point that you can drag around. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/a9bd0112-cf8f-44f3-af6d-f9a8fed152a7) +Warning: Don't use normal reroutes or primitives with these nodes, it isn't tested and this node replaces their functionality. + +
+
+ + +## WD14 Tagger +Moved to: https://github.com/pythongosssss/ComfyUI-WD14-Tagger + +## Link Render Mode +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ad3be76b-43b1-455e-a64a-bf2a6571facf) +Allows you to control the rendering of the links between nodes between straight, linear & spline, e.g. Straight. + +
+
+ + +# Changelog + +## 2023-09-22 +### Minor +- ✨ Use Civitai image as preview +- 🐛 CTRL+Enter on autocomplete will no longer accept the suggestions as it is the shortcut for queuing a prompt. +- 🐛 Fix using numbers in widget defaults +- ✨ Support setting node properties (e.g. title, colors) via widget defaults + +## 2023-09-13 +### New +- ✨ Ability to "send" an image to a Load Image node in either the current or a different workflow +### Minor +- ✨ Add support for A1111 autocomplete CSV format +- ✨ Allow setting custom node for middle click to add node + +## 2023-09-10 +### Minor +- 🐛 Fix rendering new lines in workflow image exports + +## 2023-09-08 +### New +- ✨ Add Load + Save Text file nodes, you can configure the allowed directories in the `user/text_file_dirs.json` file +### Minor +- 🎨 Show autocomplete alias word on popup +- ✨ Add setting to disable middle click from adding a reroute node +- 🎨 Add prompt for setting custom column count on image feed (click the column count label) + +## 2023-09-07 +### New +- ✨ Support Unicode (e.g. Chinese) and word aliases in autocomplete. + +## 2023-09-05 +### Minor +- 🎨 Disable autocomplete on math node +- 🐛 Fix Show Text node always resizing on update + +### Minor +- 🎨 Better adding of preview image to menu (thanks to @zeroeightysix) +- 🎨 UX improvements for image feed (thanks to @birdddev) +- 🐛 Fix Math Expression expression not showing on updated ComfyUI +- +## 2023-08-30 +### Minor +- 🎨 Allow jpeg lora/checkpoint preview images +- ✨ Save ShowText value to embedded image metadata + +## 2023-08-29 +### Minor +- ✨ Option to auto insert `, ` after autocomplete +- 🎨 Exclude arrow keys from triggering autocomplete +- 🐛 Split paths by `\` and `/` on Windows for submenus + +## 2023-08-28 +### New +- ✨ Add custom autocomplete word list setting +- ✨ Support autocomplete word priority sorting +- ✨ Support autocomplete matching anywhere in word rather than requiring starts with + +## 2023-08-27 +### New +- ✨ Add Checkpoint info +- ✨ Add embedding autocomplete +- ✨ Add embedding info +### Major +- ♻️ Refactor LoRA info + +## 2023-08-26 +### Minor +- 🐛 Fix using text widget values in Math Expression not casting to number +- 🎨 Fix padding on lightbox next arrow + +## 2023-08-25 +### Minor +- ♻️ Support older versions of python + +## 2023-08-24 +### Minor +- 🐛 Fix extracting links from LoRA info notes + +## 2023-08-23 +### Major +- 🚨 Update to use `WEB_DIRECTORY` feature instead of manual linking/copying web files + +## 2023-08-22 +### New +- ✨ Math Expression now supports IMAGE and LATENT inputs, to access the dimensions use `a.width`, `b.height` +- 🎨 Removed STRING output on Math Expression, now draws the result onto the node + +## 2023-08-21 +### New +- ✨ Allow custom note (named {file}.txt) to show in LoRA info +- ✨ Query Civita API using the model hash to provide link + +## 2023-08-20 +### New +- ✨ Add LoRA Info menu option for displaying LoRA metadata +### Minor +- 🐛 Fix crash on preset text replacement (thanks to @sjuxax) + +## 2023-08-19 +### New +- ✨ Add support for importing JPG files with embedded metadata (e.g. from Civitai) +### Minor +- 🐛 Fix crash on graph arrange where LiteGraph sometimes stores links to deleted nodes +- 🐛 Fix a couple of rendering issues in workflow export + +## 2023-08-18 +### New +- ✨ Add "example" widget to custom LoRA + Checkpoint loader allowing you to quickly view saved prompts, triggers, etc +- ✨ Add quick "Save as Preview" option on images to save generated images for models + +## 2023-08-16 +### New +- ✨ Add repeater node for generating lists or quickly duplicating nodes +### Minor +- 🐛 Support quick Add LoRA on custom Checkpoint Loader +- ✨ Support `randomint(min,max)` function in math node +- 🎨 Use relative imports to support proxied urls not on root path (thanks to @mcmonkey4eva) + +## 2023-08-13 +### Minor +- ✨ Support `round` `floor` `ceil` functions in math node +- 🐛 Fix floor division in math node + +## 2023-08-12 +### New +- 🎨 Image feed now uses a lightbox for showing images +### Minor +- 🎨 Better loader lists now supports images named `{name}.preview.png` + +## 2023-08-11 +### Minor +- ✨ Enable filter box on submenus + +## 2023-08-05 +### Major +- 🚨 The ComfyUI Lora Loader no longer has subfolders, due to compatibility issues you need to use my Lora Loader if you want subfolers, these can be enabled/disabled on the node via a setting (🐍 Enable submenu in custom nodes) +### New +- ✨ Add custom Checkpoint Loader supporting images & subfolders +- ✨ Add Play Sound node for notifying when a prompt is finished +### Minor +- ✨ Quick Nodes supports new LoRA loader ("Add 🐍 LoRA") +- ♻️ Disable link render mode if ComfyUI has native support + +## 2023-08-04 +### Minor +- ✨ Always snap to grid now applies on node resize +- 🐛 Fix reroute primitive widget value not being restored on reload +- ✨ Workflows now reuse last filename from load & save - save must be done by the submenu + +## 2023-08-02 +### New +- ✨ Add "Always snap to grid" setting that does the same as holding shift, aligning nodes to the grid +### Minor +- 🚨 No longer populates image feed when its closed +- 🐛 Allow lock/unlock of multiple selected nodes + +## 2023-08-01 +### Minor +- 🎨 Image feed now uses comfy theme variables for colors +- 🐛 Link render mode redraws graph on change of setting instead of requiring mouse move + +## 2023-07-30 +- 🎨 Update to image feed to make more user friendly, change image size to column count, various other tweaks (thanks @DrJKL) + +## 2023-07-30 +### Major +- 🐛 Fix issue with context menu (right click) not working for some users after Lora script updates +### New +- ✨ Add "Custom" option to color menu for nodes & groups +### Minor +- 🐛 Fix String Function values converted to unconnected inputs outputting the text "undefined" + +## 2023-07-29 +### New +- ✨ Added Reroute Primitive combining the functionality of reroutes + primitives, also allowing collapsing to a single point. +- ✨ Add support for exporting workflow images as PNGs and optional embedding of metadata in PNG and SVG +### Minor +- ✨ Remove new lines in Math Expression node +- ✨ String function is now an output node +- 🐛 Fix conflict between Lora Loader + Lora submenu causing the context menu to be have strangely (#23, #24) +- 🎨 Rename "SVG -> Import/Export" to "Workflow Image" -> Import/Export + +## 2023-07-27 +### New +- ✨ Added custom Lora Loader that includes image previews +### Minor +- ✨ Add preview output to string function node +- 📄 Updated missing/out of date parts of readme +- 🐛 Fix crash on show image on menu when set to not show (thanks @DrJKL) +- 🐛 Fix incorrect category (util vs utils) for math node (thanks @DrJKL) + +## 2023-07-27 +### Minor +- ✨ Save Image Feed close state +- 🐛 Fix unlocked group size calculation + +## 2023-07-21 + 22 +### Minor +- 🐛 Fix preset text incompatibility with Impact Pack (thanks @ltdrdata) + +## 2023-07-13 +### New +- ✨ Add Math Expression node for evaluating expressions using values from the graph +### Minor +- ✨ Add settings for image feed location + image order + +## 2023-06-27 +### Minor +- 🐛 Fix unlocking group using incorrect size +- ✨ Save visibility of image feed + +## 2023-06-18 +### Major Changes +- ✨ Added auto installation of scripts and `__init__` (thanks @TashaSkyUp) +- ♻️ Reworked folder structure +- 🚨 Renamed a number of nodes to include `pysssss` to prevent name conflicts +- 🚨 Remove Latent Upscale By as it is now a built in node in ComfyUI +- 🚨 Removed Anime Segmentation to own repo +### New +- ✨ Add Link Render Mode setting to choose how links are rendered +- ✨ Add Constrain Image node for resizing nodes to a min/max resolution with optional cropping +- ✨ Add Show Image On Menu to include the latest image output on the menu +- ✨ Add KSamplerAdvanced simple denoise prompt for configuring the node using steps + denoise +- 🎨 Add sizing options to Image Feed + +### Other +- ♻️ Include [canvas2svg](https://gliffy.github.io/canvas2svg/) for SVG export in assets to prevent downloading at runtime +- 🎨 Add background color (using theme color) to exported SVG +- 🐛 Fix Manage Widget Defaults to work with new ComfyUI settings dialog +- 🐛 Increase Image Feed z-index to prevent node text overlapping diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__init__.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..24c51da162278344957ce314e6a209b308e7360d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__init__.py @@ -0,0 +1,25 @@ +import importlib.util +import glob +import os +import sys +from .pysssss import init, get_ext_dir + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +if init(): + py = get_ext_dir("py") + files = glob.glob(os.path.join(py, "*.py"), recursive=False) + for file in files: + name = os.path.splitext(file)[0] + spec = importlib.util.spec_from_file_location(name, file) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) + if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: + NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS) + if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None: + NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) + +WEB_DIRECTORY = "./web" +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e49d2c41f0ac5c9a9ae7dc0a34f8b578e73a1e5 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd412c4dfaa1a3600571f0bd52fe7b1abe9e599 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e608916eba3d3956c0fa071df809f99851ecdec Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d37e7a7a7fb95ecb35c62efd8dbdefc71076f5f Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7bef1deeaf70a47c0fe1cf53d3930bafa9c25a4 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image_for_video.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image_for_video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65b0786b33831e18f3f737f888b1159375b87ca5 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image_for_video.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6380c081be623006e22e3fd8f461260d08bc3b74 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f67fc4c3e52a8034a3cd0a0d398a817e78d9d525 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95d7dd431296426baa375269d3e9616ba1cb877 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1da2007b0030d85833537dcd703ca5632e6eacb Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baa0c506b952afa27f363176c078b53877a139f7 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173bee3a67a169ad21f9b8471dfdefe874ab1776 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..827c9a5c4c72bc1e8fea5083a42cdc329c6b545a Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/system_notification.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/system_notification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cdd1a1ade49f243d3fcec768878dd8ab8155bfd Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/system_notification.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4240052356a614acf1df5818e8ea73cdf0723b Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4db0570dab92f5305bff8c8b38f354f516cdab53 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py new file mode 100644 index 0000000000000000000000000000000000000000..9955ff2990aff13181582653af1e503508cef55a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py @@ -0,0 +1,29 @@ +from server import PromptServer +from aiohttp import web +import os +import folder_paths + +dir = os.path.abspath(os.path.join(__file__, "../../user")) +if not os.path.exists(dir): + os.mkdir(dir) +file = os.path.join(dir, "autocomplete.txt") + + +@PromptServer.instance.routes.get("/pysssss/autocomplete") +async def get_autocomplete(request): + if os.path.isfile(file): + return web.FileResponse(file) + return web.Response(status=404) + + +@PromptServer.instance.routes.post("/pysssss/autocomplete") +async def update_autocomplete(request): + with open(file, "w", encoding="utf-8") as f: + f.write(await request.text()) + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/pysssss/loras") +async def get_loras(request): + loras = folder_paths.get_filename_list("loras") + return web.json_response(list(map(lambda a: os.path.splitext(a)[0], loras))) diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py new file mode 100644 index 0000000000000000000000000000000000000000..88a6656da95c0fddb1b7fd224b0cdeff69410536 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py @@ -0,0 +1,196 @@ +import glob +import os +from nodes import LoraLoader, CheckpointLoaderSimple +import folder_paths +from server import PromptServer +from folder_paths import get_directory_by_type +from aiohttp import web +import shutil + + +@PromptServer.instance.routes.get("/pysssss/view/{name}") +async def view(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + image_path = folder_paths.get_full_path( + type, name) + if not image_path: + return web.Response(status=404) + + filename = os.path.basename(image_path) + return web.FileResponse(image_path, headers={"Content-Disposition": f"filename=\"{filename}\""}) + + +@PromptServer.instance.routes.post("/pysssss/save/{name}") +async def save_preview(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + body = await request.json() + + dir = get_directory_by_type(body.get("type", "output")) + subfolder = body.get("subfolder", "") + full_output_folder = os.path.join(dir, os.path.normpath(subfolder)) + + if os.path.commonpath((dir, os.path.abspath(full_output_folder))) != dir: + return web.Response(status=400) + + filepath = os.path.join(full_output_folder, body.get("filename", "")) + image_path = folder_paths.get_full_path(type, name) + image_path = os.path.splitext( + image_path)[0] + os.path.splitext(filepath)[1] + + shutil.copyfile(filepath, image_path) + + return web.json_response({ + "image": type + "/" + os.path.basename(image_path) + }) + + +@PromptServer.instance.routes.get("/pysssss/examples/{name}") +async def get_examples(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + file_path_no_ext = os.path.splitext(file_path)[0] + examples = [] + + if os.path.isdir(file_path_no_ext): + examples += sorted(map(lambda t: os.path.relpath(t, file_path_no_ext), + glob.glob(file_path_no_ext + "/*.txt"))) + + if os.path.isfile(file_path_no_ext + ".txt"): + examples += ["notes"] + + return web.json_response(examples) + +@PromptServer.instance.routes.post("/pysssss/examples/{name}") +async def save_example(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + body = await request.json() + example_name = body["name"] + example = body["example"] + + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + if not example_name.endswith(".txt"): + example_name += ".txt" + + file_path_no_ext = os.path.splitext(file_path)[0] + example_file = os.path.join(file_path_no_ext, example_name) + if not os.path.exists(file_path_no_ext): + os.mkdir(file_path_no_ext) + with open(example_file, 'w', encoding='utf8') as f: + f.write(example) + + return web.Response(status=201) + + +def populate_items(names, type): + for idx, item_name in enumerate(names): + + file_name = os.path.splitext(item_name)[0] + file_path = folder_paths.get_full_path(type, item_name) + + if file_path is None: + print(f"(pysssss:better_combos) Unable to get path for {type} {item_name}") + continue + + file_path_no_ext = os.path.splitext(file_path)[0] + + for ext in ["png", "jpg", "jpeg", "preview.png", "preview.jpeg"]: + has_image = os.path.isfile(file_path_no_ext + "." + ext) + if has_image: + item_image = f"{file_name}.{ext}" + break + + names[idx] = { + "content": item_name, + "image": f"{type}/{item_image}" if has_image else None, + } + names.sort(key=lambda i: i["content"].lower()) + + +class LoraLoaderWithImages(LoraLoader): + RETURN_TYPES = (*LoraLoader.RETURN_TYPES, "STRING",) + + @classmethod + def INPUT_TYPES(s): + types = super().INPUT_TYPES() + names = types["required"]["lora_name"][0] + populate_items(names, "loras") + types["optional"] = { "prompt": ("HIDDEN",) } + return types + + @classmethod + def VALIDATE_INPUTS(s, lora_name): + types = super().INPUT_TYPES() + names = types["required"]["lora_name"][0] + + name = lora_name["content"] + if name in names: + return True + else: + return f"Lora not found: {name}" + + def load_lora(self, **kwargs): + kwargs["lora_name"] = kwargs["lora_name"]["content"] + prompt = kwargs.pop("prompt", "") + return (*super().load_lora(**kwargs), prompt) + + +class CheckpointLoaderSimpleWithImages(CheckpointLoaderSimple): + RETURN_TYPES = (*CheckpointLoaderSimple.RETURN_TYPES, "STRING",) + + @classmethod + def INPUT_TYPES(s): + types = super().INPUT_TYPES() + names = types["required"]["ckpt_name"][0] + populate_items(names, "checkpoints") + types["optional"] = { "prompt": ("HIDDEN",) } + return types + + @classmethod + def VALIDATE_INPUTS(s, ckpt_name): + types = super().INPUT_TYPES() + names = types["required"]["ckpt_name"][0] + + name = ckpt_name["content"] + if name in names: + return True + else: + return f"Checkpoint not found: {name}" + + def load_checkpoint(self, **kwargs): + kwargs["ckpt_name"] = kwargs["ckpt_name"]["content"] + prompt = kwargs.pop("prompt", "") + return (*super().load_checkpoint(**kwargs), prompt) + + +NODE_CLASS_MAPPINGS = { + "LoraLoader|pysssss": LoraLoaderWithImages, + "CheckpointLoader|pysssss": CheckpointLoaderSimpleWithImages, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LoraLoader|pysssss": "Lora Loader 🐍", + "CheckpointLoader|pysssss": "Checkpoint Loader 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py new file mode 100644 index 0000000000000000000000000000000000000000..04b0c31cdaf8555b57fbd9ae9b56319418ebc470 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py @@ -0,0 +1,71 @@ +import torch +import numpy as np +from PIL import Image + +class ConstrainImage: + """ + A node that constrains an image to a maximum and minimum size while maintaining aspect ratio. + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "max_width": ("INT", {"default": 1024, "min": 0}), + "max_height": ("INT", {"default": 1024, "min": 0}), + "min_width": ("INT", {"default": 0, "min": 0}), + "min_height": ("INT", {"default": 0, "min": 0}), + "crop_if_required": (["yes", "no"], {"default": "no"}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "constrain_image" + CATEGORY = "image" + OUTPUT_IS_LIST = (True,) + + def constrain_image(self, images, max_width, max_height, min_width, min_height, crop_if_required): + crop_if_required = crop_if_required == "yes" + results = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)).convert("RGB") + + current_width, current_height = img.size + aspect_ratio = current_width / current_height + + constrained_width = max(min(current_width, min_width), max_width) + constrained_height = max(min(current_height, min_height), max_height) + + if constrained_width / constrained_height > aspect_ratio: + constrained_width = max(int(constrained_height * aspect_ratio), min_width) + if crop_if_required: + constrained_height = int(current_height / (current_width / constrained_width)) + else: + constrained_height = max(int(constrained_width / aspect_ratio), min_height) + if crop_if_required: + constrained_width = int(current_width / (current_height / constrained_height)) + + resized_image = img.resize((constrained_width, constrained_height), Image.LANCZOS) + + if crop_if_required and (constrained_width > max_width or constrained_height > max_height): + left = max((constrained_width - max_width) // 2, 0) + top = max((constrained_height - max_height) // 2, 0) + right = min(constrained_width, max_width) + left + bottom = min(constrained_height, max_height) + top + resized_image = resized_image.crop((left, top, right, bottom)) + + resized_image = np.array(resized_image).astype(np.float32) / 255.0 + resized_image = torch.from_numpy(resized_image)[None,] + results.append(resized_image) + + return (results,) + +NODE_CLASS_MAPPINGS = { + "ConstrainImage|pysssss": ConstrainImage, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ConstrainImage|pysssss": "Constrain Image 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image_for_video.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image_for_video.py new file mode 100644 index 0000000000000000000000000000000000000000..81cf1eb52664eb37aa29f4a5901395b1023364d2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image_for_video.py @@ -0,0 +1,72 @@ +import torch +import numpy as np +from PIL import Image + +class ConstrainImageforVideo: + """ + A node that constrains an image to a maximum and minimum size while maintaining aspect ratio. + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "max_width": ("INT", {"default": 1024, "min": 0}), + "max_height": ("INT", {"default": 1024, "min": 0}), + "min_width": ("INT", {"default": 0, "min": 0}), + "min_height": ("INT", {"default": 0, "min": 0}), + "crop_if_required": (["yes", "no"], {"default": "no"}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("IMAGE",) + FUNCTION = "constrain_image_for_video" + CATEGORY = "image" + + def constrain_image_for_video(self, images, max_width, max_height, min_width, min_height, crop_if_required): + crop_if_required = crop_if_required == "yes" + results = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)).convert("RGB") + + current_width, current_height = img.size + aspect_ratio = current_width / current_height + + constrained_width = max(min(current_width, min_width), max_width) + constrained_height = max(min(current_height, min_height), max_height) + + if constrained_width / constrained_height > aspect_ratio: + constrained_width = max(int(constrained_height * aspect_ratio), min_width) + if crop_if_required: + constrained_height = int(current_height / (current_width / constrained_width)) + else: + constrained_height = max(int(constrained_width / aspect_ratio), min_height) + if crop_if_required: + constrained_width = int(current_width / (current_height / constrained_height)) + + resized_image = img.resize((constrained_width, constrained_height), Image.LANCZOS) + + if crop_if_required and (constrained_width > max_width or constrained_height > max_height): + left = max((constrained_width - max_width) // 2, 0) + top = max((constrained_height - max_height) // 2, 0) + right = min(constrained_width, max_width) + left + bottom = min(constrained_height, max_height) + top + resized_image = resized_image.crop((left, top, right, bottom)) + + resized_image = np.array(resized_image).astype(np.float32) / 255.0 + resized_image = torch.from_numpy(resized_image)[None,] + results.append(resized_image) + all_images = torch.cat(results, dim=0) + + return (all_images, all_images.size(0),) + +NODE_CLASS_MAPPINGS = { + "ConstrainImageforVideo|pysssss": ConstrainImageforVideo, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ConstrainImageforVideo|pysssss": "Constrain Image for Video 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py new file mode 100644 index 0000000000000000000000000000000000000000..013a2ba94cc14c339bb3bdd0ccc39c60ea5862c8 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py @@ -0,0 +1,252 @@ +import ast +import math +import random +import operator as op + +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Pow: op.pow, + ast.BitXor: op.xor, + ast.USub: op.neg, + ast.Mod: op.mod, + ast.BitAnd: op.and_, + ast.BitOr: op.or_, + ast.Invert: op.invert, + ast.And: lambda a, b: 1 if a and b else 0, + ast.Or: lambda a, b: 1 if a or b else 0, + ast.Not: lambda a: 0 if a else 1, + ast.RShift: op.rshift, + ast.LShift: op.lshift +} + +# TODO: restructure args to provide more info, generate hint based on args to save duplication +functions = { + "round": { + "args": (1, 2), + "call": lambda a, b = None: round(a, b), + "hint": "number, dp? = 0" + }, + "ceil": { + "args": (1, 1), + "call": lambda a: math.ceil(a), + "hint": "number" + }, + "floor": { + "args": (1, 1), + "call": lambda a: math.floor(a), + "hint": "number" + }, + "min": { + "args": (2, None), + "call": lambda *args: min(*args), + "hint": "...numbers" + }, + "max": { + "args": (2, None), + "call": lambda *args: max(*args), + "hint": "...numbers" + }, + "randomint": { + "args": (2, 2), + "call": lambda a, b: random.randint(a, b), + "hint": "min, max" + }, + "randomchoice": { + "args": (2, None), + "call": lambda *args: random.choice(args), + "hint": "...numbers" + }, + "sqrt": { + "args": (1, 1), + "call": lambda a: math.sqrt(a), + "hint": "number" + }, + "int": { + "args": (1, 1), + "call": lambda a = None: int(a), + "hint": "number" + }, + "iif": { + "args": (3, 3), + "call": lambda a, b, c = None: b if a else c, + "hint": "value, truepart, falsepart" + }, +} + +autocompleteWords = list({ + "text": x, + "value": f"{x}()", + "showValue": False, + "hint": f"{functions[x]['hint']}", + "caretOffset": -1 +} for x in functions.keys()) + + +class MathExpression: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "expression": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": { + "words": autocompleteWords, + "separator": "" + }}), + }, + "optional": { + "a": (any, ), + "b": (any,), + "c": (any, ), + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", + "prompt": "PROMPT"}, + } + + RETURN_TYPES = ("INT", "FLOAT", ) + FUNCTION = "evaluate" + CATEGORY = "utils" + OUTPUT_NODE = True + + @classmethod + def IS_CHANGED(s, expression, **kwargs): + if "random" in expression: + return float("nan") + return expression + + def get_widget_value(self, extra_pnginfo, prompt, node_name, widget_name): + workflow = extra_pnginfo["workflow"] if "workflow" in extra_pnginfo else { "nodes": [] } + node_id = None + for node in workflow["nodes"]: + name = node["type"] + if "properties" in node: + if "Node name for S&R" in node["properties"]: + name = node["properties"]["Node name for S&R"] + if name == node_name: + node_id = node["id"] + break + if "title" in node: + name = node["title"] + if name == node_name: + node_id = node["id"] + break + if node_id is not None: + values = prompt[str(node_id)] + if "inputs" in values: + if widget_name in values["inputs"]: + value = values["inputs"][widget_name] + if isinstance(value, list): + raise ValueError("Converted widgets are not supported via named reference, use the inputs instead.") + return value + raise NameError(f"Widget not found: {node_name}.{widget_name}") + raise NameError(f"Node not found: {node_name}.{widget_name}") + + def get_size(self, target, property): + if isinstance(target, dict) and "samples" in target: + # Latent + if property == "width": + return target["samples"].shape[3] * 8 + return target["samples"].shape[2] * 8 + else: + # Image + if property == "width": + return target.shape[2] + return target.shape[1] + + def evaluate(self, expression, prompt, extra_pnginfo={}, a=None, b=None, c=None): + expression = expression.replace('\n', ' ').replace('\r', '') + node = ast.parse(expression, mode='eval').body + + lookup = {"a": a, "b": b, "c": c} + + def eval_op(node, l, r): + l = eval_expr(l) + r = eval_expr(r) + l = l if isinstance(l, int) else float(l) + r = r if isinstance(r, int) else float(r) + return operators[type(node.op)](l, r) + + def eval_expr(node): + if isinstance(node, ast.Constant) or isinstance(node, ast.Num): + return node.n + elif isinstance(node, ast.BinOp): + return eval_op(node, node.left, node.right) + elif isinstance(node, ast.BoolOp): + return eval_op(node, node.values[0], node.values[1]) + elif isinstance(node, ast.UnaryOp): + return operators[type(node.op)](eval_expr(node.operand)) + elif isinstance(node, ast.Attribute): + if node.value.id in lookup: + if node.attr == "width" or node.attr == "height": + return self.get_size(lookup[node.value.id], node.attr) + + return self.get_widget_value(extra_pnginfo, prompt, node.value.id, node.attr) + elif isinstance(node, ast.Name): + if node.id in lookup: + val = lookup[node.id] + if isinstance(val, (int, float, complex)): + return val + else: + raise TypeError( + f"Compex types (LATENT/IMAGE) need to reference their width/height, e.g. {node.id}.width") + raise NameError(f"Name not found: {node.id}") + elif isinstance(node, ast.Call): + if node.func.id in functions: + fn = functions[node.func.id] + l = len(node.args) + if l < fn["args"][0] or (fn["args"][1] is not None and l > fn["args"][1]): + if fn["args"][1] is None: + toErr = " or more" + else: + toErr = f" to {fn['args'][1]}" + raise SyntaxError( + f"Invalid function call: {node.func.id} requires {fn['args'][0]}{toErr} arguments") + args = [] + for arg in node.args: + args.append(eval_expr(arg)) + return fn["call"](*args) + raise NameError(f"Invalid function call: {node.func.id}") + elif isinstance(node, ast.Compare): + l = eval_expr(node.left) + r = eval_expr(node.comparators[0]) + if isinstance(node.ops[0], ast.Eq): + return 1 if l == r else 0 + if isinstance(node.ops[0], ast.NotEq): + return 1 if l != r else 0 + if isinstance(node.ops[0], ast.Gt): + return 1 if l > r else 0 + if isinstance(node.ops[0], ast.GtE): + return 1 if l >= r else 0 + if isinstance(node.ops[0], ast.Lt): + return 1 if l < r else 0 + if isinstance(node.ops[0], ast.LtE): + return 1 if l <= r else 0 + raise NotImplementedError( + "Operator " + node.ops[0].__class__.__name__ + " not supported.") + else: + raise TypeError(node) + + r = eval_expr(node) + return {"ui": {"value": [r]}, "result": (int(r), float(r),)} + + +NODE_CLASS_MAPPINGS = { + "MathExpression|pysssss": MathExpression, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "MathExpression|pysssss": "Math Expression 🐍", +} + diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3a176483cdb143ad92503c4d33aa18c9b56899 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py @@ -0,0 +1,115 @@ +import hashlib +import json +from aiohttp import web +from server import PromptServer +import folder_paths +import os + + +def get_metadata(filepath): + with open(filepath, "rb") as file: + # https://github.com/huggingface/safetensors#format + # 8 bytes: N, an unsigned little-endian 64-bit integer, containing the size of the header + header_size = int.from_bytes(file.read(8), "little", signed=False) + + if header_size <= 0: + raise BufferError("Invalid header size") + + header = file.read(header_size) + if header_size <= 0: + raise BufferError("Invalid header") + + header_json = json.loads(header) + return header_json["__metadata__"] if "__metadata__" in header_json else None + + +@PromptServer.instance.routes.post("/pysssss/metadata/notes/{name}") +async def save_notes(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = None + if type == "embeddings" or type == "loras": + name = name.lower() + files = folder_paths.get_filename_list(type) + for f in files: + lower_f = f.lower() + if lower_f == name: + file_path = folder_paths.get_full_path(type, f) + else: + n = os.path.splitext(f)[0].lower() + if n == name: + file_path = folder_paths.get_full_path(type, f) + + if file_path is not None: + break + else: + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + file_no_ext = os.path.splitext(file_path)[0] + info_file = file_no_ext + ".txt" + with open(info_file, "w") as f: + f.write(await request.text()) + + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/pysssss/metadata/{name}") +async def load_metadata(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = None + if type == "embeddings" or type == "loras": + name = name.lower() + files = folder_paths.get_filename_list(type) + for f in files: + lower_f = f.lower() + if lower_f == name: + file_path = folder_paths.get_full_path(type, f) + else: + n = os.path.splitext(f)[0].lower() + if n == name: + file_path = folder_paths.get_full_path(type, f) + + if file_path is not None: + break + else: + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + try: + meta = get_metadata(file_path) + except: + meta = None + + if meta is None: + meta = {} + + file_no_ext = os.path.splitext(file_path)[0] + + info_file = file_no_ext + ".txt" + if os.path.isfile(info_file): + with open(info_file, "r") as f: + meta["pysssss.notes"] = f.read() + + hash_file = file_no_ext + ".sha256" + if os.path.isfile(hash_file): + with open(hash_file, "rt") as f: + meta["pysssss.sha256"] = f.read() + else: + with open(file_path, "rb") as f: + meta["pysssss.sha256"] = hashlib.sha256(f.read()).hexdigest() + with open(hash_file, "wt") as f: + f.write(meta["pysssss.sha256"]) + + return web.json_response(meta) diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py new file mode 100644 index 0000000000000000000000000000000000000000..02b0c21f1a63a2ea971aa1f4373b3e508b46eb1c --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py @@ -0,0 +1,42 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class PlaySound: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "any": (any, {}), + "mode": (["always", "on empty queue"], {}), + "volume": ("FLOAT", {"min": 0, "max": 1, "step": 0.1, "default": 0.5}), + "file": ("STRING", { "default": "notify.mp3" }) + }} + + FUNCTION = "nop" + INPUT_IS_LIST = True + OUTPUT_IS_LIST = (True,) + OUTPUT_NODE = True + RETURN_TYPES = (any,) + + CATEGORY = "utils" + + def IS_CHANGED(self, **kwargs): + return float("NaN") + + def nop(self, any, mode, volume, file): + return {"ui": {"a": []}, "result": (any,)} + + +NODE_CLASS_MAPPINGS = { + "PlaySound|pysssss": PlaySound, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PlaySound|pysssss": "PlaySound 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py new file mode 100644 index 0000000000000000000000000000000000000000..425a7f9b74890c8e56b4cf1e0ba777680ced7540 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py @@ -0,0 +1,46 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class Repeater: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "source": (any, {}), + "repeats": ("INT", {"min": 0, "max": 5000, "default": 2}), + "output": (["single", "multi"], {}), + "node_mode": (["reuse", "create"], {}), + }} + + RETURN_TYPES = (any,) + FUNCTION = "repeat" + OUTPUT_NODE = False + OUTPUT_IS_LIST = (True,) + + CATEGORY = "utils" + + def repeat(self, repeats, output, node_mode, **kwargs): + if output == "multi": + # Multi outputs are split to indiviual nodes on the frontend when serializing + return ([kwargs["source"]],) + elif node_mode == "reuse": + # When reusing we have a single input node, repeat that N times + return ([kwargs["source"]] * repeats,) + else: + # When creating new nodes, they'll be added dynamically when the graph is serialized + return ((list(kwargs.values())),) + + +NODE_CLASS_MAPPINGS = { + "Repeater|pysssss": Repeater, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Repeater|pysssss": "Repeater 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a640c7e20e868e93f02180f0f98f588646d195 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py @@ -0,0 +1,59 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class ReroutePrimitive: + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"value": (any, )}, + } + + @classmethod + def VALIDATE_INPUTS(s, **kwargs): + return True + + RETURN_TYPES = (any,) + FUNCTION = "route" + CATEGORY = "__hidden__" + + def route(self, value): + return (value,) + + +class MultiPrimitive: + @classmethod + def INPUT_TYPES(cls): + return { + "required": {}, + "optional": {"value": (any, )}, + } + + @classmethod + def VALIDATE_INPUTS(s, **kwargs): + return True + + RETURN_TYPES = (any,) + FUNCTION = "listify" + CATEGORY = "utils" + OUTPUT_IS_LIST = (True,) + + def listify(self, **kwargs): + return (list(kwargs.values()),) + + +NODE_CLASS_MAPPINGS = { + "ReroutePrimitive|pysssss": ReroutePrimitive, + # "MultiPrimitive|pysssss": MultiPrimitive, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ReroutePrimitive|pysssss": "Reroute Primitive 🐍", + # "MultiPrimitive|pysssss": "Multi Primitive 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py new file mode 100644 index 0000000000000000000000000000000000000000..1359cf0ce5587d37f7e0251af0eb7a8cba1e3e98 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py @@ -0,0 +1,49 @@ +class ShowText: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"forceInput": True}), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + } + + INPUT_IS_LIST = True + RETURN_TYPES = ("STRING",) + FUNCTION = "notify" + OUTPUT_NODE = True + OUTPUT_IS_LIST = (True,) + + CATEGORY = "utils" + + def notify(self, text, unique_id=None, extra_pnginfo=None): + if unique_id is not None and extra_pnginfo is not None: + if not isinstance(extra_pnginfo, list): + print("Error: extra_pnginfo is not a list") + elif ( + not isinstance(extra_pnginfo[0], dict) + or "workflow" not in extra_pnginfo[0] + ): + print("Error: extra_pnginfo[0] is not a dict or missing 'workflow' key") + else: + workflow = extra_pnginfo[0]["workflow"] + node = next( + (x for x in workflow["nodes"] if str(x["id"]) == str(unique_id[0])), + None, + ) + if node: + node["widgets_values"] = [text] + + return {"ui": {"text": text}, "result": (text,)} + + +NODE_CLASS_MAPPINGS = { + "ShowText|pysssss": ShowText, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ShowText|pysssss": "Show Text 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py new file mode 100644 index 0000000000000000000000000000000000000000..08c8356480942ee36a8fb9b132bf1ba33cbc9cee --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py @@ -0,0 +1,49 @@ +import re + +class StringFunction: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "action": (["append", "replace"], {}), + "tidy_tags": (["yes", "no"], {}), + }, + "optional": { + "text_a": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "text_b": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "text_c": ("STRING", {"multiline": True, "dynamicPrompts": False}) + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "exec" + CATEGORY = "utils" + OUTPUT_NODE = True + + def exec(self, action, tidy_tags, text_a="", text_b="", text_c=""): + tidy_tags = tidy_tags == "yes" + out = "" + if action == "append": + out = (", " if tidy_tags else "").join(filter(None, [text_a, text_b, text_c])) + else: + if text_c is None: + text_c = "" + if text_b.startswith("/") and text_b.endswith("/"): + regex = text_b[1:-1] + out = re.sub(regex, text_c, text_a) + else: + out = text_a.replace(text_b, text_c) + if tidy_tags: + out = re.sub(r"\s{2,}", " ", out) + out = out.replace(" ,", ",") + out = re.sub(r",{2,}", ",", out) + out = out.strip() + return {"ui": {"text": (out,)}, "result": (out,)} + +NODE_CLASS_MAPPINGS = { + "StringFunction|pysssss": StringFunction, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "StringFunction|pysssss": "String Function 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/system_notification.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/system_notification.py new file mode 100644 index 0000000000000000000000000000000000000000..258c84347e80eec3449f19be59a4fdf90b18884d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/system_notification.py @@ -0,0 +1,41 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class SystemNotification: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "message": ("STRING", {"default": "Your notification has triggered."}), + "any": (any, {}), + "mode": (["always", "on empty queue"], {}), + }} + + FUNCTION = "nop" + INPUT_IS_LIST = True + OUTPUT_IS_LIST = (True,) + OUTPUT_NODE = True + RETURN_TYPES = (any,) + + CATEGORY = "utils" + + def IS_CHANGED(self, **kwargs): + return float("NaN") + + def nop(self, any, **kwargs): + return {"ui": {"a": []}, "result": (any,)} + + +NODE_CLASS_MAPPINGS = { + "SystemNotification|pysssss": SystemNotification, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "SystemNotification|pysssss": "SystemNotification 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9fc99c7898901d8a3234482e3390c2f93785ea --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py @@ -0,0 +1,200 @@ +import os +import folder_paths +import json +from server import PromptServer +import glob +from aiohttp import web + + +def get_allowed_dirs(): + dir = os.path.abspath(os.path.join(__file__, "../../user")) + file = os.path.join(dir, "text_file_dirs.json") + with open(file, "r") as f: + return json.loads(f.read()) + + +def get_valid_dirs(): + return get_allowed_dirs().keys() + + +def get_dir_from_name(name): + dirs = get_allowed_dirs() + if name not in dirs: + raise KeyError(name + " dir not found") + + path = dirs[name] + path = path.replace("$input", folder_paths.get_input_directory()) + path = path.replace("$output", folder_paths.get_output_directory()) + path = path.replace("$temp", folder_paths.get_temp_directory()) + return path + + +def is_child_dir(parent_path, child_path): + parent_path = os.path.abspath(parent_path) + child_path = os.path.abspath(child_path) + return os.path.commonpath([parent_path]) == os.path.commonpath([parent_path, child_path]) + + +def get_real_path(dir): + dir = dir.replace("/**/", "/") + dir = os.path.abspath(dir) + dir = os.path.split(dir)[0] + return dir + + +@PromptServer.instance.routes.get("/pysssss/text-file/{name}") +async def get_files(request): + name = request.match_info["name"] + dir = get_dir_from_name(name) + recursive = "/**/" in dir + # Ugh cant use root_path on glob... lazy hack.. + pre = get_real_path(dir) + + files = list(map(lambda t: os.path.relpath(t, pre), + glob.glob(dir, recursive=recursive))) + + if len(files) == 0: + files = ["[none]"] + return web.json_response(files) + + +def get_file(root_dir, file): + if file == "[none]" or not file or not file.strip(): + raise ValueError("No file") + + root_dir = get_dir_from_name(root_dir) + root_dir = get_real_path(root_dir) + if not os.path.exists(root_dir): + os.mkdir(root_dir) + full_path = os.path.join(root_dir, file) + + if not is_child_dir(root_dir, full_path): + raise ReferenceError() + + return full_path + + +class TextFileNode: + RETURN_TYPES = ("STRING",) + CATEGORY = "utils" + + @classmethod + def VALIDATE_INPUTS(self, root_dir, file, **kwargs): + if file == "[none]" or not file or not file.strip(): + return True + get_file(root_dir, file) + return True + + def load_text(self, **kwargs): + self.file = get_file(kwargs["root_dir"], kwargs["file"]) + with open(self.file, "r") as f: + return (f.read(), ) + + +class LoadText(TextFileNode): + @classmethod + def IS_CHANGED(self, **kwargs): + return os.path.getmtime(self.file) + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "root_dir": (list(get_valid_dirs()), {}), + "file": (["[none]"], { + "pysssss.binding": [{ + "source": "root_dir", + "callback": [{ + "type": "set", + "target": "$this.disabled", + "value": True + }, { + "type": "fetch", + "url": "/pysssss/text-file/{$source.value}", + "then": [{ + "type": "set", + "target": "$this.options.values", + "value": "$result" + }, { + "type": "validate-combo" + }, { + "type": "set", + "target": "$this.disabled", + "value": False + }] + }], + }] + }) + }, + } + + FUNCTION = "load_text" + + +class SaveText(TextFileNode): + OUTPUT_NODE = True + + @classmethod + def IS_CHANGED(self, **kwargs): + return float("nan") + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "root_dir": (list(get_valid_dirs()), {}), + "file": ("STRING", {"default": "file.txt"}), + "append": (["append", "overwrite", "new only"], {}), + "insert": ("BOOLEAN", { + "default": True, "label_on": "new line", "label_off": "none", + "pysssss.binding": [{ + "source": "append", + "callback": [{ + "type": "if", + "condition": [{ + "left": "$source.value", + "op": "eq", + "right": '"append"' + }], + "true": [{ + "type": "set", + "target": "$this.disabled", + "value": False + }], + "false": [{ + "type": "set", + "target": "$this.disabled", + "value": True + }], + }] + }] + }), + "text": ("STRING", {"forceInput": True, "multiline": True}) + }, + } + + FUNCTION = "write_text" + + def write_text(self, **kwargs): + self.file = get_file(kwargs["root_dir"], kwargs["file"]) + if kwargs["append"] == "new only" and os.path.exists(self.file): + raise FileExistsError( + self.file + " already exists and 'new only' is selected.") + with open(self.file, "a+" if kwargs["append"] == "append" else "w") as f: + is_append = f.tell() != 0 + if is_append and kwargs["insert"]: + f.write("\n") + f.write(kwargs["text"]) + + return super().load_text(**kwargs) + + +NODE_CLASS_MAPPINGS = { + "LoadText|pysssss": LoadText, + "SaveText|pysssss": SaveText, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LoadText|pysssss": "Load Text 🐍", + "SaveText|pysssss": "Save Text 🐍", +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py new file mode 100644 index 0000000000000000000000000000000000000000..02c2262c739c1a7a18b6d2abed1b3046c3a380da --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py @@ -0,0 +1,59 @@ +from server import PromptServer +from aiohttp import web +import os +import inspect +import json +import importlib +import sys +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) +import pysssss + +root_directory = os.path.dirname(inspect.getfile(PromptServer)) +workflows_directory = os.path.join(root_directory, "pysssss-workflows") +workflows_directory = pysssss.get_config_value( + "workflows.directory", workflows_directory) + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + + +@PromptServer.instance.routes.get("/pysssss/workflows") +async def get_workflows(request): + files = [] + for dirpath, directories, file in os.walk(workflows_directory): + for file in file: + if (file.endswith(".json")): + files.append(os.path.relpath(os.path.join( + dirpath, file), workflows_directory)) + return web.json_response(list(map(lambda f: os.path.splitext(f)[0].replace("\\", "/"), files))) + + +@PromptServer.instance.routes.get("/pysssss/workflows/{name:.+}") +async def get_workflow(request): + file = os.path.abspath(os.path.join( + workflows_directory, request.match_info["name"] + ".json")) + if os.path.commonpath([file, workflows_directory]) != workflows_directory: + return web.Response(status=403) + + return web.FileResponse(file) + + +@PromptServer.instance.routes.post("/pysssss/workflows") +async def save_workflow(request): + json_data = await request.json() + file = os.path.abspath(os.path.join( + workflows_directory, json_data["name"] + ".json")) + if os.path.commonpath([file, workflows_directory]) != workflows_directory: + return web.Response(status=403) + + if os.path.exists(file) and ("overwrite" not in json_data or json_data["overwrite"] == False): + return web.Response(status=409) + + sub_path = os.path.dirname(file) + if not os.path.exists(sub_path): + os.makedirs(sub_path) + + with open(file, "w") as f: + f.write(json.dumps(json_data["workflow"])) + + return web.Response(status=201) diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pyproject.toml b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..a1c952d2369b1d28b5d48ebe478bf5c9c61aa354 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "comfyui-custom-scripts" +description = "Enhancements & experiments for ComfyUI, mostly focusing on UI features" +version = "1.0.0" +license = { file = "LICENSE" } + +[project.urls] +Repository = "https://github.com/pythongosssss/ComfyUI-Custom-Scripts" + +[tool.comfy] +PublisherId = "pythongosssss" +DisplayName = "ComfyUI-Custom-Scripts" +Icon = "" diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.default.json b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.default.json new file mode 100644 index 0000000000000000000000000000000000000000..5e610aa088f089ecd379e1211b4202be26b5eefe --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.default.json @@ -0,0 +1,4 @@ +{ + "name": "CustomScripts", + "logging": false +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json new file mode 100644 index 0000000000000000000000000000000000000000..34cf62eb6ec3a5b6a74243d0800c368fe985322f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json @@ -0,0 +1,7 @@ +{ + "name": "CustomScripts", + "logging": false, + "workflows": { + "directory": "C:\\ComfyUI-Workflows" + } +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json new file mode 100644 index 0000000000000000000000000000000000000000..5e610aa088f089ecd379e1211b4202be26b5eefe --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json @@ -0,0 +1,4 @@ +{ + "name": "CustomScripts", + "logging": false +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py new file mode 100644 index 0000000000000000000000000000000000000000..da044bfa3b86a8c1d27aec2089e14b90fce2285a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py @@ -0,0 +1,300 @@ +import asyncio +import os +import json +import shutil +import inspect +import aiohttp +from server import PromptServer +from tqdm import tqdm + +config = None + + +def is_logging_enabled(): + config = get_extension_config() + if "logging" not in config: + return False + return config["logging"] + + +def log(message, type=None, always=False, name=None): + if not always and not is_logging_enabled(): + return + + if type is not None: + message = f"[{type}] {message}" + + if name is None: + name = get_extension_config()["name"] + + print(f"(pysssss:{name}) {message}") + + +def get_ext_dir(subpath=None, mkdir=False): + dir = os.path.dirname(__file__) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + if mkdir and not os.path.exists(dir): + os.makedirs(dir) + return dir + + +def get_comfy_dir(subpath=None, mkdir=False): + dir = os.path.dirname(inspect.getfile(PromptServer)) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + if mkdir and not os.path.exists(dir): + os.makedirs(dir) + return dir + + +def get_web_ext_dir(): + config = get_extension_config() + name = config["name"] + dir = get_comfy_dir("web/extensions/pysssss") + if not os.path.exists(dir): + os.makedirs(dir) + dir = os.path.join(dir, name) + return dir + + +def get_extension_config(reload=False): + global config + if reload == False and config is not None: + return config + + config_path = get_ext_dir("pysssss.json") + default_config_path = get_ext_dir("pysssss.default.json") + if not os.path.exists(config_path): + if os.path.exists(default_config_path): + shutil.copy(default_config_path, config_path) + if not os.path.exists(config_path): + log(f"Failed to create config at {config_path}", type="ERROR", always=True, name="???") + print(f"Extension path: {get_ext_dir()}") + return {"name": "Unknown", "version": -1} + + else: + log("Missing pysssss.default.json, this extension may not work correctly. Please reinstall the extension.", + type="ERROR", always=True, name="???") + print(f"Extension path: {get_ext_dir()}") + return {"name": "Unknown", "version": -1} + + with open(config_path, "r") as f: + config = json.loads(f.read()) + return config + + +def link_js(src, dst): + src = os.path.abspath(src) + dst = os.path.abspath(dst) + if os.name == "nt": + try: + import _winapi + _winapi.CreateJunction(src, dst) + return True + except: + pass + try: + os.symlink(src, dst) + return True + except: + import logging + logging.exception('') + return False + + +def is_junction(path): + if os.name != "nt": + return False + try: + return bool(os.readlink(path)) + except OSError: + return False + + +def install_js(): + src_dir = get_ext_dir("web/js") + if not os.path.exists(src_dir): + log("No JS") + return + + should_install = should_install_js() + if should_install: + log("it looks like you're running an old version of ComfyUI that requires manual setup of web files, it is recommended you update your installation.", "warning", True) + dst_dir = get_web_ext_dir() + linked = os.path.islink(dst_dir) or is_junction(dst_dir) + if linked or os.path.exists(dst_dir): + if linked: + if should_install: + log("JS already linked") + else: + os.unlink(dst_dir) + log("JS unlinked, PromptServer will serve extension") + elif not should_install: + shutil.rmtree(dst_dir) + log("JS deleted, PromptServer will serve extension") + return + + if not should_install: + log("JS skipped, PromptServer will serve extension") + return + + if link_js(src_dir, dst_dir): + log("JS linked") + return + + log("Copying JS files") + shutil.copytree(src_dir, dst_dir, dirs_exist_ok=True) + + +def should_install_js(): + return not hasattr(PromptServer.instance, "supports") or "custom_nodes_from_web" not in PromptServer.instance.supports + + +def init(check_imports=None): + log("Init") + + if check_imports is not None: + import importlib.util + for imp in check_imports: + spec = importlib.util.find_spec(imp) + if spec is None: + log(f"{imp} is required, please check requirements are installed.", + type="ERROR", always=True) + return False + + install_js() + return True + + +def get_async_loop(): + loop = None + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return loop + + +def get_http_session(): + loop = get_async_loop() + return aiohttp.ClientSession(loop=loop) + + +async def download(url, stream, update_callback=None, session=None): + close_session = False + if session is None: + close_session = True + session = get_http_session() + try: + async with session.get(url) as response: + size = int(response.headers.get('content-length', 0)) or None + + with tqdm( + unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1], total=size, + ) as progressbar: + perc = 0 + async for chunk in response.content.iter_chunked(2048): + stream.write(chunk) + progressbar.update(len(chunk)) + if update_callback is not None and progressbar.total is not None and progressbar.total != 0: + last = perc + perc = round(progressbar.n / progressbar.total, 2) + if perc != last: + last = perc + await update_callback(perc) + finally: + if close_session and session is not None: + await session.close() + + +async def download_to_file(url, destination, update_callback=None, is_ext_subpath=True, session=None): + if is_ext_subpath: + destination = get_ext_dir(destination) + with open(destination, mode='wb') as f: + download(url, f, update_callback, session) + + +def wait_for_async(async_fn, loop=None): + res = [] + + async def run_async(): + r = await async_fn() + res.append(r) + + if loop is None: + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + loop.run_until_complete(run_async()) + + return res[0] + + +def update_node_status(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + PromptServer.instance.send_sync("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) + + +async def update_node_status_async(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + await PromptServer.instance.send("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) + + +def get_config_value(key, default=None, throw=False): + split = key.split(".") + obj = get_extension_config() + for s in split: + if s in obj: + obj = obj[s] + else: + if throw: + raise KeyError("Configuration key missing: " + key) + else: + return default + return obj + + +def is_inside_dir(root_dir, check_path): + root_dir = os.path.abspath(root_dir) + if not os.path.isabs(check_path): + check_path = os.path.abspath(os.path.join(root_dir, check_path)) + return os.path.commonpath([check_path, root_dir]) == root_dir + + +def get_child_dir(root_dir, child_path, throw_if_outside=True): + child_path = os.path.abspath(os.path.join(root_dir, child_path)) + if is_inside_dir(root_dir, child_path): + return child_path + if throw_if_outside: + raise NotADirectoryError( + "Saving outside the target folder is not allowed.") + return None diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json new file mode 100644 index 0000000000000000000000000000000000000000..49270e58301660ca43887e2268e144d2d67979bb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json @@ -0,0 +1,5 @@ +{ + "input": "$input/**/*.txt", + "output": "$output/**/*.txt", + "temp": "$temp/**/*.txt" +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js new file mode 100644 index 0000000000000000000000000000000000000000..72df742f61c0b5feee69db7e3a8387d157836700 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js @@ -0,0 +1,1192 @@ +/*!! + * Canvas 2 Svg v1.0.19 + * A low level canvas to SVG converter. Uses a mock canvas context to build an SVG document. + * + * Licensed under the MIT license: + * http://www.opensource.org/licenses/mit-license.php + * + * Author: + * Kerry Liu + * + * Copyright (c) 2014 Gliffy Inc. + */ + +;(function() { + "use strict"; + + var STYLES, ctx, CanvasGradient, CanvasPattern, namedEntities; + + //helper function to format a string + function format(str, args) { + var keys = Object.keys(args), i; + for (i=0; i 1) { + options = defaultOptions; + options.width = arguments[0]; + options.height = arguments[1]; + } else if( !o ) { + options = defaultOptions; + } else { + options = o; + } + + if(!(this instanceof ctx)) { + //did someone call this without new? + return new ctx(options); + } + + //setup options + this.width = options.width || defaultOptions.width; + this.height = options.height || defaultOptions.height; + this.enableMirroring = options.enableMirroring !== undefined ? options.enableMirroring : defaultOptions.enableMirroring; + + this.canvas = this; ///point back to this instance! + this.__document = options.document || document; + this.__canvas = this.__document.createElement("canvas"); + this.__ctx = this.__canvas.getContext("2d"); + + this.__setDefaultStyles(); + this.__stack = [this.__getStyleState()]; + this.__groupStack = []; + + //the root svg element + this.__root = this.__document.createElementNS("http://www.w3.org/2000/svg", "svg"); + this.__root.setAttribute("version", 1.1); + this.__root.setAttribute("xmlns", "http://www.w3.org/2000/svg"); + this.__root.setAttributeNS("http://www.w3.org/2000/xmlns/", "xmlns:xlink", "http://www.w3.org/1999/xlink"); + this.__root.setAttribute("width", this.width); + this.__root.setAttribute("height", this.height); + + //make sure we don't generate the same ids in defs + this.__ids = {}; + + //defs tag + this.__defs = this.__document.createElementNS("http://www.w3.org/2000/svg", "defs"); + this.__root.appendChild(this.__defs); + + //also add a group child. the svg element can't use the transform attribute + this.__currentElement = this.__document.createElementNS("http://www.w3.org/2000/svg", "g"); + this.__root.appendChild(this.__currentElement); + }; + + + /** + * Creates the specified svg element + * @private + */ + ctx.prototype.__createElement = function (elementName, properties, resetFill) { + if (typeof properties === "undefined") { + properties = {}; + } + + var element = this.__document.createElementNS("http://www.w3.org/2000/svg", elementName), + keys = Object.keys(properties), i, key; + if(resetFill) { + //if fill or stroke is not specified, the svg element should not display. By default SVG's fill is black. + element.setAttribute("fill", "none"); + element.setAttribute("stroke", "none"); + } + for(i=0; i 0) { + var group = this.__createElement("g"); + parent.appendChild(group); + this.__currentElement = group; + } + + var transform = this.__currentElement.getAttribute("transform"); + if(transform) { + transform += " "; + } else { + transform = ""; + } + transform += t; + this.__currentElement.setAttribute("transform", transform); + }; + + /** + * scales the current element + */ + ctx.prototype.scale = function(x, y) { + if(y === undefined) { + y = x; + } + this.__addTransform(format("scale({x},{y})", {x:x, y:y})); + }; + + /** + * rotates the current element + */ + ctx.prototype.rotate = function(angle){ + var degrees = (angle * 180 / Math.PI); + this.__addTransform(format("rotate({angle},{cx},{cy})", {angle:degrees, cx:0, cy:0})); + }; + + /** + * translates the current element + */ + ctx.prototype.translate = function(x, y){ + this.__addTransform(format("translate({x},{y})", {x:x,y:y})); + }; + + /** + * applies a transform to the current element + */ + ctx.prototype.transform = function(a, b, c, d, e, f){ + this.__addTransform(format("matrix({a},{b},{c},{d},{e},{f})", {a:a, b:b, c:c, d:d, e:e, f:f})); + }; + + /** + * Create a new Path Element + */ + ctx.prototype.beginPath = function(){ + var path, parent; + + // Note that there is only one current default path, it is not part of the drawing state. + // See also: https://html.spec.whatwg.org/multipage/scripting.html#current-default-path + this.__currentDefaultPath = ""; + this.__currentPosition = {}; + + path = this.__createElement("path", {}, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(path); + this.__currentElement = path; + }; + + /** + * Helper function to apply currentDefaultPath to current path element + * @private + */ + ctx.prototype.__applyCurrentDefaultPath = function() { + if(this.__currentElement.nodeName === "path") { + var d = this.__currentDefaultPath; + this.__currentElement.setAttribute("d", d); + } else { + throw new Error("Attempted to apply path command to node " + this.__currentElement.nodeName); + } + }; + + /** + * Helper function to add path command + * @private + */ + ctx.prototype.__addPathCommand = function(command){ + this.__currentDefaultPath += " "; + this.__currentDefaultPath += command; + }; + + /** + * Adds the move command to the current path element, + * if the currentPathElement is not empty create a new path element + */ + ctx.prototype.moveTo = function(x,y){ + if(this.__currentElement.nodeName !== "path") { + this.beginPath(); + } + + // creates a new subpath with the given point + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("M {x} {y}", {x:x, y:y})); + }; + + /** + * Closes the current path + */ + ctx.prototype.closePath = function(){ + this.__addPathCommand("Z"); + }; + + /** + * Adds a line to command + */ + ctx.prototype.lineTo = function(x, y){ + this.__currentPosition = {x: x, y: y}; + if (this.__currentDefaultPath.indexOf('M') > -1) { + this.__addPathCommand(format("L {x} {y}", {x:x, y:y})); + } else { + this.__addPathCommand(format("M {x} {y}", {x:x, y:y})); + } + }; + + /** + * Add a bezier command + */ + ctx.prototype.bezierCurveTo = function(cp1x, cp1y, cp2x, cp2y, x, y) { + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("C {cp1x} {cp1y} {cp2x} {cp2y} {x} {y}", + {cp1x:cp1x, cp1y:cp1y, cp2x:cp2x, cp2y:cp2y, x:x, y:y})); + }; + + /** + * Adds a quadratic curve to command + */ + ctx.prototype.quadraticCurveTo = function(cpx, cpy, x, y){ + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("Q {cpx} {cpy} {x} {y}", {cpx:cpx, cpy:cpy, x:x, y:y})); + }; + + + /** + * Return a new normalized vector of given vector + */ + var normalize = function(vector) { + var len = Math.sqrt(vector[0] * vector[0] + vector[1] * vector[1]); + return [vector[0] / len, vector[1] / len]; + }; + + /** + * Adds the arcTo to the current path + * + * @see http://www.w3.org/TR/2015/WD-2dcontext-20150514/#dom-context-2d-arcto + */ + ctx.prototype.arcTo = function(x1, y1, x2, y2, radius) { + // Let the point (x0, y0) be the last point in the subpath. + var x0 = this.__currentPosition && this.__currentPosition.x; + var y0 = this.__currentPosition && this.__currentPosition.y; + + // First ensure there is a subpath for (x1, y1). + if (typeof x0 == "undefined" || typeof y0 == "undefined") { + return; + } + + // Negative values for radius must cause the implementation to throw an IndexSizeError exception. + if (radius < 0) { + throw new Error("IndexSizeError: The radius provided (" + radius + ") is negative."); + } + + // If the point (x0, y0) is equal to the point (x1, y1), + // or if the point (x1, y1) is equal to the point (x2, y2), + // or if the radius radius is zero, + // then the method must add the point (x1, y1) to the subpath, + // and connect that point to the previous point (x0, y0) by a straight line. + if (((x0 === x1) && (y0 === y1)) + || ((x1 === x2) && (y1 === y2)) + || (radius === 0)) { + this.lineTo(x1, y1); + return; + } + + // Otherwise, if the points (x0, y0), (x1, y1), and (x2, y2) all lie on a single straight line, + // then the method must add the point (x1, y1) to the subpath, + // and connect that point to the previous point (x0, y0) by a straight line. + var unit_vec_p1_p0 = normalize([x0 - x1, y0 - y1]); + var unit_vec_p1_p2 = normalize([x2 - x1, y2 - y1]); + if (unit_vec_p1_p0[0] * unit_vec_p1_p2[1] === unit_vec_p1_p0[1] * unit_vec_p1_p2[0]) { + this.lineTo(x1, y1); + return; + } + + // Otherwise, let The Arc be the shortest arc given by circumference of the circle that has radius radius, + // and that has one point tangent to the half-infinite line that crosses the point (x0, y0) and ends at the point (x1, y1), + // and that has a different point tangent to the half-infinite line that ends at the point (x1, y1), and crosses the point (x2, y2). + // The points at which this circle touches these two lines are called the start and end tangent points respectively. + + // note that both vectors are unit vectors, so the length is 1 + var cos = (unit_vec_p1_p0[0] * unit_vec_p1_p2[0] + unit_vec_p1_p0[1] * unit_vec_p1_p2[1]); + var theta = Math.acos(Math.abs(cos)); + + // Calculate origin + var unit_vec_p1_origin = normalize([ + unit_vec_p1_p0[0] + unit_vec_p1_p2[0], + unit_vec_p1_p0[1] + unit_vec_p1_p2[1] + ]); + var len_p1_origin = radius / Math.sin(theta / 2); + var x = x1 + len_p1_origin * unit_vec_p1_origin[0]; + var y = y1 + len_p1_origin * unit_vec_p1_origin[1]; + + // Calculate start angle and end angle + // rotate 90deg clockwise (note that y axis points to its down) + var unit_vec_origin_start_tangent = [ + -unit_vec_p1_p0[1], + unit_vec_p1_p0[0] + ]; + // rotate 90deg counter clockwise (note that y axis points to its down) + var unit_vec_origin_end_tangent = [ + unit_vec_p1_p2[1], + -unit_vec_p1_p2[0] + ]; + var getAngle = function(vector) { + // get angle (clockwise) between vector and (1, 0) + var x = vector[0]; + var y = vector[1]; + if (y >= 0) { // note that y axis points to its down + return Math.acos(x); + } else { + return -Math.acos(x); + } + }; + var startAngle = getAngle(unit_vec_origin_start_tangent); + var endAngle = getAngle(unit_vec_origin_end_tangent); + + // Connect the point (x0, y0) to the start tangent point by a straight line + this.lineTo(x + unit_vec_origin_start_tangent[0] * radius, + y + unit_vec_origin_start_tangent[1] * radius); + + // Connect the start tangent point to the end tangent point by arc + // and adding the end tangent point to the subpath. + this.arc(x, y, radius, startAngle, endAngle); + }; + + /** + * Sets the stroke property on the current element + */ + ctx.prototype.stroke = function(){ + if(this.__currentElement.nodeName === "path") { + this.__currentElement.setAttribute("paint-order", "fill stroke markers"); + } + this.__applyCurrentDefaultPath(); + this.__applyStyleToCurrentElement("stroke"); + }; + + /** + * Sets fill properties on the current element + */ + ctx.prototype.fill = function(){ + if(this.__currentElement.nodeName === "path") { + this.__currentElement.setAttribute("paint-order", "stroke fill markers"); + } + this.__applyCurrentDefaultPath(); + this.__applyStyleToCurrentElement("fill"); + }; + + /** + * Adds a rectangle to the path. + */ + ctx.prototype.rect = function(x, y, width, height){ + if(this.__currentElement.nodeName !== "path") { + this.beginPath(); + } + this.moveTo(x, y); + this.lineTo(x+width, y); + this.lineTo(x+width, y+height); + this.lineTo(x, y+height); + this.lineTo(x, y); + this.closePath(); + }; + + + /** + * adds a rectangle element + */ + ctx.prototype.fillRect = function(x, y, width, height){ + var rect, parent; + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height + }, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(rect); + this.__currentElement = rect; + this.__applyStyleToCurrentElement("fill"); + }; + + /** + * Draws a rectangle with no fill + * @param x + * @param y + * @param width + * @param height + */ + ctx.prototype.strokeRect = function(x, y, width, height){ + var rect, parent; + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height + }, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(rect); + this.__currentElement = rect; + this.__applyStyleToCurrentElement("stroke"); + }; + + + /** + * Clear entire canvas: + * 1. save current transforms + * 2. remove all the childNodes of the root g element + */ + ctx.prototype.__clearCanvas = function() { + var current = this.__closestGroupOrSvg(), + transform = current.getAttribute("transform"); + var rootGroup = this.__root.childNodes[1]; + var childNodes = rootGroup.childNodes; + for (var i = childNodes.length - 1; i >= 0; i--) { + if (childNodes[i]) { + rootGroup.removeChild(childNodes[i]); + } + } + this.__currentElement = rootGroup; + //reset __groupStack as all the child group nodes are all removed. + this.__groupStack = []; + if (transform) { + this.__addTransform(transform); + } + }; + + /** + * "Clears" a canvas by just drawing a white rectangle in the current group. + */ + ctx.prototype.clearRect = function(x, y, width, height) { + //clear entire canvas + if (x === 0 && y === 0 && width === this.width && height === this.height) { + this.__clearCanvas(); + return; + } + var rect, parent = this.__closestGroupOrSvg(); + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height, + fill : "#FFFFFF" + }, true); + parent.appendChild(rect); + }; + + /** + * Adds a linear gradient to a defs tag. + * Returns a canvas gradient object that has a reference to it's parent def + */ + ctx.prototype.createLinearGradient = function(x1, y1, x2, y2){ + var grad = this.__createElement("linearGradient", { + id : randomString(this.__ids), + x1 : x1+"px", + x2 : x2+"px", + y1 : y1+"px", + y2 : y2+"px", + "gradientUnits" : "userSpaceOnUse" + }, false); + this.__defs.appendChild(grad); + return new CanvasGradient(grad, this); + }; + + /** + * Adds a radial gradient to a defs tag. + * Returns a canvas gradient object that has a reference to it's parent def + */ + ctx.prototype.createRadialGradient = function(x0, y0, r0, x1, y1, r1){ + var grad = this.__createElement("radialGradient", { + id : randomString(this.__ids), + cx : x1+"px", + cy : y1+"px", + r : r1+"px", + fx : x0+"px", + fy : y0+"px", + "gradientUnits" : "userSpaceOnUse" + }, false); + this.__defs.appendChild(grad); + return new CanvasGradient(grad, this); + + }; + + /** + * Parses the font string and returns svg mapping + * @private + */ + ctx.prototype.__parseFont = function() { + var regex = /^\s*(?=(?:(?:[-a-z]+\s*){0,2}(italic|oblique))?)(?=(?:(?:[-a-z]+\s*){0,2}(small-caps))?)(?=(?:(?:[-a-z]+\s*){0,2}(bold(?:er)?|lighter|[1-9]00))?)(?:(?:normal|\1|\2|\3)\s*){0,3}((?:xx?-)?(?:small|large)|medium|smaller|larger|[.\d]+(?:\%|in|[cem]m|ex|p[ctx]))(?:\s*\/\s*(normal|[.\d]+(?:\%|in|[cem]m|ex|p[ctx])))?\s*([-,\'\"\sa-z]+?)\s*$/i; + var fontPart = regex.exec( this.font ); + var data = { + style : fontPart[1] || 'normal', + size : fontPart[4] || '10px', + family : fontPart[6] || 'sans-serif', + weight: fontPart[3] || 'normal', + decoration : fontPart[2] || 'normal', + href : null + }; + + //canvas doesn't support underline natively, but we can pass this attribute + if(this.__fontUnderline === "underline") { + data.decoration = "underline"; + } + + //canvas also doesn't support linking, but we can pass this as well + if(this.__fontHref) { + data.href = this.__fontHref; + } + + return data; + }; + + /** + * Helper to link text fragments + * @param font + * @param element + * @return {*} + * @private + */ + ctx.prototype.__wrapTextLink = function(font, element) { + if(font.href) { + var a = this.__createElement("a"); + a.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", font.href); + a.appendChild(element); + return a; + } + return element; + }; + + /** + * Fills or strokes text + * @param text + * @param x + * @param y + * @param action - stroke or fill + * @private + */ + ctx.prototype.__applyText = function(text, x, y, action) { + var font = this.__parseFont(), + parent = this.__closestGroupOrSvg(), + textElement = this.__createElement("text", { + "font-family" : font.family, + "font-size" : font.size, + "font-style" : font.style, + "font-weight" : font.weight, + "text-decoration" : font.decoration, + "x" : x, + "y" : y, + "text-anchor": getTextAnchor(this.textAlign), + "dominant-baseline": getDominantBaseline(this.textBaseline) + }, true); + + textElement.appendChild(this.__document.createTextNode(text)); + this.__currentElement = textElement; + this.__applyStyleToCurrentElement(action); + parent.appendChild(this.__wrapTextLink(font,textElement)); + }; + + /** + * Creates a text element + * @param text + * @param x + * @param y + */ + ctx.prototype.fillText = function(text, x, y){ + this.__applyText(text, x, y, "fill"); + }; + + /** + * Strokes text + * @param text + * @param x + * @param y + */ + ctx.prototype.strokeText = function(text, x, y){ + this.__applyText(text, x, y, "stroke"); + }; + + /** + * No need to implement this for svg. + * @param text + * @return {TextMetrics} + */ + ctx.prototype.measureText = function(text){ + this.__ctx.font = this.font; + return this.__ctx.measureText(text); + }; + + /** + * Arc command! + */ + ctx.prototype.arc = function(x, y, radius, startAngle, endAngle, counterClockwise) { + // in canvas no circle is drawn if no angle is provided. + if (startAngle === endAngle) { + return; + } + startAngle = startAngle % (2*Math.PI); + endAngle = endAngle % (2*Math.PI); + if(startAngle === endAngle) { + //circle time! subtract some of the angle so svg is happy (svg elliptical arc can't draw a full circle) + endAngle = ((endAngle + (2*Math.PI)) - 0.001 * (counterClockwise ? -1 : 1)) % (2*Math.PI); + } + var endX = x+radius*Math.cos(endAngle), + endY = y+radius*Math.sin(endAngle), + startX = x+radius*Math.cos(startAngle), + startY = y+radius*Math.sin(startAngle), + sweepFlag = counterClockwise ? 0 : 1, + largeArcFlag = 0, + diff = endAngle - startAngle; + + // https://github.com/gliffy/canvas2svg/issues/4 + if(diff < 0) { + diff += 2*Math.PI; + } + + if(counterClockwise) { + largeArcFlag = diff > Math.PI ? 0 : 1; + } else { + largeArcFlag = diff > Math.PI ? 1 : 0; + } + + this.lineTo(startX, startY); + this.__addPathCommand(format("A {rx} {ry} {xAxisRotation} {largeArcFlag} {sweepFlag} {endX} {endY}", + {rx:radius, ry:radius, xAxisRotation:0, largeArcFlag:largeArcFlag, sweepFlag:sweepFlag, endX:endX, endY:endY})); + + this.__currentPosition = {x: endX, y: endY}; + }; + + /** + * Generates a ClipPath from the clip command. + */ + ctx.prototype.clip = function(){ + var group = this.__closestGroupOrSvg(), + clipPath = this.__createElement("clipPath"), + id = randomString(this.__ids), + newGroup = this.__createElement("g"); + + this.__applyCurrentDefaultPath(); + group.removeChild(this.__currentElement); + clipPath.setAttribute("id", id); + clipPath.appendChild(this.__currentElement); + + this.__defs.appendChild(clipPath); + + //set the clip path to this group + group.setAttribute("clip-path", format("url(#{id})", {id:id})); + + //clip paths can be scaled and transformed, we need to add another wrapper group to avoid later transformations + // to this path + group.appendChild(newGroup); + + this.__currentElement = newGroup; + + }; + + /** + * Draws a canvas, image or mock context to this canvas. + * Note that all svg dom manipulation uses node.childNodes rather than node.children for IE support. + * http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#dom-context-2d-drawimage + */ + ctx.prototype.drawImage = function(){ + //convert arguments to a real array + var args = Array.prototype.slice.call(arguments), + image=args[0], + dx, dy, dw, dh, sx=0, sy=0, sw, sh, parent, svg, defs, group, + currentElement, svgImage, canvas, context, id; + + if(args.length === 3) { + dx = args[1]; + dy = args[2]; + sw = image.width; + sh = image.height; + dw = sw; + dh = sh; + } else if(args.length === 5) { + dx = args[1]; + dy = args[2]; + dw = args[3]; + dh = args[4]; + sw = image.width; + sh = image.height; + } else if(args.length === 9) { + sx = args[1]; + sy = args[2]; + sw = args[3]; + sh = args[4]; + dx = args[5]; + dy = args[6]; + dw = args[7]; + dh = args[8]; + } else { + throw new Error("Inavlid number of arguments passed to drawImage: " + arguments.length); + } + + parent = this.__closestGroupOrSvg(); + currentElement = this.__currentElement; + var translateDirective = "translate(" + dx + ", " + dy + ")"; + if(image instanceof ctx) { + //canvas2svg mock canvas context. In the future we may want to clone nodes instead. + //also I'm currently ignoring dw, dh, sw, sh, sx, sy for a mock context. + svg = image.getSvg().cloneNode(true); + if (svg.childNodes && svg.childNodes.length > 1) { + defs = svg.childNodes[0]; + while(defs.childNodes.length) { + id = defs.childNodes[0].getAttribute("id"); + this.__ids[id] = id; + this.__defs.appendChild(defs.childNodes[0]); + } + group = svg.childNodes[1]; + if (group) { + //save original transform + var originTransform = group.getAttribute("transform"); + var transformDirective; + if (originTransform) { + transformDirective = originTransform+" "+translateDirective; + } else { + transformDirective = translateDirective; + } + group.setAttribute("transform", transformDirective); + parent.appendChild(group); + } + } + } else if(image.nodeName === "CANVAS" || image.nodeName === "IMG") { + //canvas or image + svgImage = this.__createElement("image"); + svgImage.setAttribute("width", dw); + svgImage.setAttribute("height", dh); + svgImage.setAttribute("preserveAspectRatio", "none"); + + if(sx || sy || sw !== image.width || sh !== image.height) { + //crop the image using a temporary canvas + canvas = this.__document.createElement("canvas"); + canvas.width = dw; + canvas.height = dh; + context = canvas.getContext("2d"); + context.drawImage(image, sx, sy, sw, sh, 0, 0, dw, dh); + image = canvas; + } + svgImage.setAttribute("transform", translateDirective); + svgImage.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", + image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src")); + parent.appendChild(svgImage); + } + }; + + /** + * Generates a pattern tag + */ + ctx.prototype.createPattern = function(image, repetition){ + var pattern = this.__document.createElementNS("http://www.w3.org/2000/svg", "pattern"), id = randomString(this.__ids), + img; + pattern.setAttribute("id", id); + pattern.setAttribute("width", image.width); + pattern.setAttribute("height", image.height); + if(image.nodeName === "CANVAS" || image.nodeName === "IMG") { + img = this.__document.createElementNS("http://www.w3.org/2000/svg", "image"); + img.setAttribute("width", image.width); + img.setAttribute("height", image.height); + img.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", + image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src")); + pattern.appendChild(img); + this.__defs.appendChild(pattern); + } else if(image instanceof ctx) { + pattern.appendChild(image.__root.childNodes[1]); + this.__defs.appendChild(pattern); + } + return new CanvasPattern(pattern, this); + }; + + ctx.prototype.setLineDash = function(dashArray) { + if (dashArray && dashArray.length > 0) { + this.lineDash = dashArray.join(","); + } else { + this.lineDash = null; + } + }; + + /** + * Not yet implemented + */ + ctx.prototype.drawFocusRing = function(){}; + ctx.prototype.createImageData = function(){}; + ctx.prototype.getImageData = function(){}; + ctx.prototype.putImageData = function(){}; + ctx.prototype.globalCompositeOperation = function(){}; + ctx.prototype.setTransform = function(){}; + + //add options for alternative namespace + if (typeof window === "object") { + window.C2S = ctx; + } + + // CommonJS/Browserify + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = ctx; + } + +}()); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico new file mode 100644 index 0000000000000000000000000000000000000000..64045ab56e87879adb039b1fdb0bbbe0462143e7 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..08df2481551bb6903735fa69d658b5abfb0a5ae1 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5e3fdabbb12142dfa75702a0b0e0ca5e5425a7dd Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js new file mode 100644 index 0000000000000000000000000000000000000000..50168bf02fa2174a48ab99e2ed73c250be572257 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js @@ -0,0 +1,588 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; +import { api } from "../../../scripts/api.js"; +import { $el, ComfyDialog } from "../../../scripts/ui.js"; +import { TextAreaAutoComplete } from "./common/autocomplete.js"; +import { ModelInfoDialog } from "./common/modelInfoDialog.js"; +import { LoraInfoDialog } from "./modelInfo.js"; + +function parseCSV(csvText) { + const rows = []; + const delimiter = ","; + const quote = '"'; + let currentField = ""; + let inQuotedField = false; + + function pushField() { + rows[rows.length - 1].push(currentField); + currentField = ""; + inQuotedField = false; + } + + rows.push([]); // Initialize the first row + + for (let i = 0; i < csvText.length; i++) { + const char = csvText[i]; + const nextChar = csvText[i + 1]; + + // Special handling for backslash escaped quotes + if (char === "\\" && nextChar === quote) { + currentField += quote; + i++; + } + + if (!inQuotedField) { + if (char === quote) { + inQuotedField = true; + } else if (char === delimiter) { + pushField(); + } else if (char === "\r" || char === "\n" || i === csvText.length - 1) { + pushField(); + if (nextChar === "\n") { + i++; // Handle Windows line endings (\r\n) + } + rows.push([]); // Start a new row + } else { + currentField += char; + } + } else { + if (char === quote && nextChar === quote) { + currentField += quote; + i++; // Skip the next quote + } else if (char === quote) { + inQuotedField = false; + } else if (char === "\r" || char === "\n" || i === csvText.length - 1) { + // Dont allow new lines in quoted text, assume its wrong + const parsed = parseCSV(currentField); + rows.pop(); + rows.push(...parsed); + inQuotedField = false; + currentField = ""; + rows.push([]); + } else { + currentField += char; + } + } + } + + if (currentField || csvText[csvText.length - 1] === ",") { + pushField(); + } + + // Remove the last row if it's empty + if (rows[rows.length - 1].length === 0) { + rows.pop(); + } + + return rows; +} + +async function getCustomWords() { + const resp = await api.fetchApi("/pysssss/autocomplete", { cache: "no-store" }); + if (resp.status === 200) { + return await resp.text(); + } + return undefined; +} + +async function addCustomWords(text) { + if (!text) { + text = await getCustomWords(); + } + if (text) { + TextAreaAutoComplete.updateWords( + "pysssss.customwords", + parseCSV(text).reduce((p, n) => { + let text; + let priority; + let value; + let num; + switch (n.length) { + case 0: + return; + case 1: + // Single word + text = n[0]; + break; + case 2: + // Word,[priority|alias] + num = +n[1]; + if (isNaN(num)) { + text = n[0] + "🔄️" + n[1]; + value = n[0]; + } else { + text = n[0]; + priority = num; + } + break; + case 4: + // a1111 csv format? + value = n[0]; + priority = +n[2]; + const aliases = n[3]?.trim(); + if (aliases && aliases !== "null") { // Weird null in an example csv, maybe they are JSON.parsing the last column? + const split = aliases.split(","); + for (const text of split) { + p[text] = { text, priority, value }; + } + } + text = value; + break; + default: + // Word,alias,priority + text = n[1]; + value = n[0]; + priority = +n[2]; + break; + } + p[text] = { text, priority, value }; + return p; + }, {}) + ); + } +} + +function toggleLoras() { + [TextAreaAutoComplete.globalWords, TextAreaAutoComplete.globalWordsExclLoras] = [ + TextAreaAutoComplete.globalWordsExclLoras, + TextAreaAutoComplete.globalWords, + ]; +} + +class EmbeddingInfoDialog extends ModelInfoDialog { + async addInfo() { + super.addInfo(); + const info = await this.addCivitaiInfo(); + if (info) { + $el("div", { + parent: this.content, + innerHTML: info.description, + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + } +} + +class CustomWordsDialog extends ComfyDialog { + async show() { + const text = await getCustomWords(); + this.words = $el("textarea", { + textContent: text, + style: { + width: "70vw", + height: "70vh", + }, + }); + + const input = $el("input", { + style: { + flex: "auto", + }, + value: + "https://gist.githubusercontent.com/pythongosssss/1d3efa6050356a08cea975183088159a/raw/a18fb2f94f9156cf4476b0c24a09544d6c0baec6/danbooru-tags.txt", + }); + + super.show( + $el( + "div", + { + style: { + display: "flex", + flexDirection: "column", + overflow: "hidden", + maxHeight: "100%", + }, + }, + [ + $el("h2", { + textContent: "Custom Autocomplete Words", + style: { + color: "#fff", + marginTop: 0, + textAlign: "center", + fontFamily: "sans-serif", + }, + }), + $el( + "div", + { + style: { + color: "#fff", + fontFamily: "sans-serif", + display: "flex", + alignItems: "center", + gap: "5px", + }, + }, + [ + $el("label", { textContent: "Load Custom List: " }), + input, + $el("button", { + textContent: "Load", + onclick: async () => { + try { + const res = await fetch(input.value); + if (res.status !== 200) { + throw new Error("Error loading: " + res.status + " " + res.statusText); + } + this.words.value = await res.text(); + } catch (error) { + alert("Error loading custom list, try manually copy + pasting the list"); + } + }, + }), + ] + ), + this.words, + ] + ) + ); + } + + createButtons() { + const btns = super.createButtons(); + const save = $el("button", { + type: "button", + textContent: "Save", + onclick: async (e) => { + try { + const res = await api.fetchApi("/pysssss/autocomplete", { method: "POST", body: this.words.value }); + if (res.status !== 200) { + throw new Error("Error saving: " + res.status + " " + res.statusText); + } + save.textContent = "Saved!"; + addCustomWords(this.words.value); + setTimeout(() => { + save.textContent = "Save"; + }, 500); + } catch (error) { + alert("Error saving word list!"); + console.error(error); + } + }, + }); + + btns.unshift(save); + return btns; + } +} + +const id = "pysssss.AutoCompleter"; + +app.registerExtension({ + name: id, + init() { + const STRING = ComfyWidgets.STRING; + const SKIP_WIDGETS = new Set(["ttN xyPlot.x_values", "ttN xyPlot.y_values"]); + ComfyWidgets.STRING = function (node, inputName, inputData) { + const r = STRING.apply(this, arguments); + + if (inputData[1]?.multiline) { + // Disabled on this input + const config = inputData[1]?.["pysssss.autocomplete"]; + if (config === false) return r; + + // In list of widgets to skip + const id = `${node.comfyClass}.${inputName}`; + if (SKIP_WIDGETS.has(id)) return r; + + let words; + let separator; + if (typeof config === "object") { + separator = config.separator; + words = {}; + if (config.words) { + // Custom wordlist, this will have been registered on setup + Object.assign(words, TextAreaAutoComplete.groups[node.comfyClass + "." + inputName] ?? {}); + } + + for (const item of config.groups ?? []) { + if (item === "*") { + // This widget wants all global words included + Object.assign(words, TextAreaAutoComplete.globalWords); + } else { + // This widget wants a specific group included + Object.assign(words, TextAreaAutoComplete.groups[item] ?? {}); + } + } + } + + new TextAreaAutoComplete(r.widget.inputEl, words, separator); + } + + return r; + }; + + TextAreaAutoComplete.globalSeparator = localStorage.getItem(id + ".AutoSeparate") ?? ", "; + const enabledSetting = app.ui.settings.addSetting({ + id, + name: "🐍 Text Autocomplete", + defaultValue: true, + type: (name, setter, value) => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: name, + }), + ]), + $el("td", [ + $el( + "label", + { + textContent: "Enabled ", + style: { + display: "block", + }, + }, + [ + $el("input", { + id: id.replaceAll(".", "-"), + type: "checkbox", + checked: value, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.enabled = checked; + setter(checked); + }, + }), + ] + ), + $el( + "label.comfy-tooltip-indicator", + { + title: "This requires other ComfyUI nodes/extensions that support using LoRAs in the prompt.", + textContent: "Loras enabled ", + style: { + display: "block", + }, + }, + [ + $el("input", { + type: "checkbox", + checked: !!TextAreaAutoComplete.lorasEnabled, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.lorasEnabled = checked; + toggleLoras(); + localStorage.setItem(id + ".ShowLoras", TextAreaAutoComplete.lorasEnabled); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Auto-insert comma ", + style: { + display: "block", + }, + }, + [ + $el("input", { + type: "checkbox", + checked: !!TextAreaAutoComplete.globalSeparator, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.globalSeparator = checked ? ", " : ""; + localStorage.setItem(id + ".AutoSeparate", TextAreaAutoComplete.globalSeparator); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Replace _ with space ", + style: { + display: "block", + }, + }, + [ + $el("input", { + type: "checkbox", + checked: !!TextAreaAutoComplete.replacer, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.replacer = checked ? (v) => v.replaceAll("_", " ") : undefined; + localStorage.setItem(id + ".ReplaceUnderscore", checked); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Insert suggestion on: ", + style: { + display: "block", + }, + }, + [ + $el( + "label", + { + textContent: "Tab", + style: { + display: "block", + marginLeft: "20px", + }, + }, + [ + $el("input", { + type: "checkbox", + checked: !!TextAreaAutoComplete.insertOnTab, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.insertOnTab = checked; + localStorage.setItem(id + ".InsertOnTab", checked); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Enter", + style: { + display: "block", + marginLeft: "20px", + }, + }, + [ + $el("input", { + type: "checkbox", + checked: !!TextAreaAutoComplete.insertOnEnter, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.insertOnEnter = checked; + localStorage.setItem(id + ".InsertOnEnter", checked); + }, + }), + ] + ), + ] + ), + $el( + "label", + { + textContent: "Max suggestions: ", + style: { + display: "block", + }, + }, + [ + $el("input", { + type: "number", + value: +TextAreaAutoComplete.suggestionCount, + style: { + width: "80px" + }, + onchange: (event) => { + const value = +event.target.value; + TextAreaAutoComplete.suggestionCount = value;; + localStorage.setItem(id + ".SuggestionCount", TextAreaAutoComplete.suggestionCount); + }, + }), + ] + ), + $el("button", { + textContent: "Manage Custom Words", + onclick: () => { + app.ui.settings.element.close(); + new CustomWordsDialog().show(); + }, + style: { + fontSize: "14px", + display: "block", + marginTop: "5px", + }, + }), + ]), + ]); + }, + }); + + TextAreaAutoComplete.enabled = enabledSetting.value; + TextAreaAutoComplete.replacer = localStorage.getItem(id + ".ReplaceUnderscore") === "true" ? (v) => v.replaceAll("_", " ") : undefined; + TextAreaAutoComplete.insertOnTab = localStorage.getItem(id + ".InsertOnTab") !== "false"; + TextAreaAutoComplete.insertOnEnter = localStorage.getItem(id + ".InsertOnEnter") !== "false"; + TextAreaAutoComplete.lorasEnabled = localStorage.getItem(id + ".ShowLoras") === "true"; + TextAreaAutoComplete.suggestionCount = +localStorage.getItem(id + ".SuggestionCount") || 20; + }, + setup() { + async function addEmbeddings() { + const embeddings = await api.getEmbeddings(); + const words = {}; + words["embedding:"] = { text: "embedding:" }; + + for (const emb of embeddings) { + const v = `embedding:${emb}`; + words[v] = { + text: v, + info: () => new EmbeddingInfoDialog(emb).show("embeddings", emb), + use_replacer: false, + }; + } + + TextAreaAutoComplete.updateWords("pysssss.embeddings", words); + } + + async function addLoras() { + let loras; + try { + loras = LiteGraph.registered_node_types["LoraLoader"]?.nodeData.input.required.lora_name[0]; + } catch (error) {} + + if (!loras?.length) { + loras = await api.fetchApi("/pysssss/loras", { cache: "no-store" }).then((res) => res.json()); + } + + const words = {}; + words["lora:"] = { text: "lora:" }; + + for (const lora of loras) { + const v = ``; + words[v] = { + text: v, + info: () => new LoraInfoDialog(lora).show("loras", lora), + use_replacer: false, + }; + } + + TextAreaAutoComplete.updateWords("pysssss.loras", words); + } + + // store global words with/without loras + Promise.all([addEmbeddings(), addCustomWords()]) + .then(() => { + TextAreaAutoComplete.globalWordsExclLoras = Object.assign({}, TextAreaAutoComplete.globalWords); + }) + .then(addLoras) + .then(() => { + if (!TextAreaAutoComplete.lorasEnabled) { + toggleLoras(); // off by default + } + }); + }, + beforeRegisterNodeDef(_, def) { + // Process each input to see if there is a custom word list for + // { input: { required: { something: ["STRING", { "pysssss.autocomplete": ["groupid", ["custom", "words"] ] }] } } } + const inputs = { ...def.input?.required, ...def.input?.optional }; + for (const input in inputs) { + const config = inputs[input][1]?.["pysssss.autocomplete"]; + if (!config) continue; + if (typeof config === "object" && config.words) { + const words = {}; + for (const text of config.words || []) { + const obj = typeof text === "string" ? { text } : text; + words[obj.text] = obj; + } + TextAreaAutoComplete.updateWords(def.name + "." + input, words, false); + } + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js new file mode 100644 index 0000000000000000000000000000000000000000..0fb6d2f749641d9a775b05425a210df9114945b0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js @@ -0,0 +1,370 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; +import { $el } from "../../../scripts/ui.js"; +import { api } from "../../../scripts/api.js"; + +const CHECKPOINT_LOADER = "CheckpointLoader|pysssss"; +const LORA_LOADER = "LoraLoader|pysssss"; + +function getType(node) { + if (node.comfyClass === CHECKPOINT_LOADER) { + return "checkpoints"; + } + return "loras"; +} + +app.registerExtension({ + name: "pysssss.Combo++", + init() { + $el("style", { + textContent: ` + .litemenu-entry:hover .pysssss-combo-image { + display: block; + } + .pysssss-combo-image { + display: none; + position: absolute; + left: 0; + top: 0; + transform: translate(-100%, 0); + width: 384px; + height: 384px; + background-size: contain; + background-position: top right; + background-repeat: no-repeat; + filter: brightness(65%); + } + `, + parent: document.body, + }); + + const submenuSetting = app.ui.settings.addSetting({ + id: "pysssss.Combo++.Submenu", + name: "🐍 Enable submenu in custom nodes", + defaultValue: true, + type: "boolean", + }); + + // Ensure hook callbacks are available + const getOrSet = (target, name, create) => { + if (name in target) return target[name]; + return (target[name] = create()); + }; + const symbol = getOrSet(window, "__pysssss__", () => Symbol("__pysssss__")); + const store = getOrSet(window, symbol, () => ({})); + const contextMenuHook = getOrSet(store, "contextMenuHook", () => ({})); + for (const e of ["ctor", "preAddItem", "addItem"]) { + if (!contextMenuHook[e]) { + contextMenuHook[e] = []; + } + } + // // Checks if this is a custom combo item + const isCustomItem = (value) => value && typeof value === "object" && "image" in value && value.content; + // Simple check for what separator to split by + const splitBy = (navigator.platform || navigator.userAgent).includes("Win") ? /\/|\\/ : /\//; + + contextMenuHook["ctor"].push(function (values, options) { + // Copy the class from the parent so if we are dark we are also dark + // this enables the filter box + if (options.parentMenu?.options?.className === "dark") { + options.className = "dark"; + } + }); + + function encodeRFC3986URIComponent(str) { + return encodeURIComponent(str).replace( + /[!'()*]/g, + (c) => `%${c.charCodeAt(0).toString(16).toUpperCase()}`, + ); + } + + // After an element is created for an item, add an image if it has one + contextMenuHook["addItem"].push(function (el, menu, [name, value, options]) { + if (el && isCustomItem(value) && value?.image && !value.submenu) { + el.textContent += " *"; + $el("div.pysssss-combo-image", { + parent: el, + style: { + backgroundImage: `url(/pysssss/view/${encodeRFC3986URIComponent(value.image)})`, + }, + }); + } + }); + + function buildMenu(widget, values) { + const lookup = { + "": { options: [] }, + }; + + // Split paths into menu structure + for (const value of values) { + const split = value.content.split(splitBy); + let path = ""; + for (let i = 0; i < split.length; i++) { + const s = split[i]; + const last = i === split.length - 1; + if (last) { + // Leaf node, manually add handler that sets the lora + lookup[path].options.push({ + ...value, + title: s, + callback: () => { + widget.value = value; + widget.callback(value); + app.graph.setDirtyCanvas(true); + }, + }); + } else { + const prevPath = path; + path += s + splitBy; + if (!lookup[path]) { + const sub = { + title: s, + submenu: { + options: [], + title: s, + }, + }; + + // Add to tree + lookup[path] = sub.submenu; + lookup[prevPath].options.push(sub); + } + } + } + } + + return lookup[""].options; + } + + // Override COMBO widgets to patch their values + const combo = ComfyWidgets["COMBO"]; + ComfyWidgets["COMBO"] = function (node, inputName, inputData) { + const type = inputData[0]; + const res = combo.apply(this, arguments); + if (isCustomItem(type[0])) { + let value = res.widget.value; + let values = res.widget.options.values; + let menu = null; + + // Override the option values to check if we should render a menu structure + Object.defineProperty(res.widget.options, "values", { + get() { + let v = values; + + if (submenuSetting.value) { + if (!menu) { + // Only build the menu once + menu = buildMenu(res.widget, values); + } + v = menu; + } + + const valuesIncludes = v.includes; + v.includes = function (searchElement) { + const includesFromMenuItems = function (items) { + for (const item of items) { + if (includesFromMenuItem(item)) { + return true; + } + } + return false; + } + const includesFromMenuItem = function (item) { + if (item.submenu) { + return includesFromMenuItems(item.submenu.options) + } else { + return item.content === searchElement.content; + } + } + + const includes = valuesIncludes.apply(this, arguments) || includesFromMenuItems(this); + return includes; + } + + return v; + }, + set(v) { + // Options are changing (refresh) so reset the menu so it can be rebuilt if required + values = v; + menu = null; + }, + }); + + Object.defineProperty(res.widget, "value", { + get() { + // HACK: litegraph supports rendering items with "content" in the menu, but not on the widget + // This detects when its being called by the widget drawing and just returns the text + // Also uses the content for the same image replacement value + if (res.widget) { + const stack = new Error().stack; + if (stack.includes("drawNodeWidgets") || stack.includes("saveImageExtraOutput")) { + return (value || type[0]).content; + } + } + return value; + }, + set(v) { + if (v?.submenu) { + // Dont allow selection of submenus + return; + } + value = v; + }, + }); + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + const isCkpt = nodeType.comfyClass === CHECKPOINT_LOADER; + const isLora = nodeType.comfyClass === LORA_LOADER; + if (isCkpt || isLora) { + const onAdded = nodeType.prototype.onAdded; + nodeType.prototype.onAdded = function () { + onAdded?.apply(this, arguments); + const { widget: exampleList } = ComfyWidgets["COMBO"](this, "example", [[""]], app); + + let exampleWidget; + + const get = async (route, suffix) => { + const url = encodeURIComponent(`${getType(nodeType)}${suffix || ""}`); + return await api.fetchApi(`/pysssss/${route}/${url}`); + }; + + const getExample = async () => { + if (exampleList.value === "[none]") { + if (exampleWidget) { + exampleWidget.inputEl.remove(); + exampleWidget = null; + this.widgets.length -= 1; + } + return; + } + + const v = this.widgets[0].value.content; + const pos = v.lastIndexOf("."); + const name = v.substr(0, pos); + let exampleName = exampleList.value; + let viewPath = `/${name}`; + if (exampleName === "notes") { + viewPath += ".txt"; + } else { + viewPath += `/${exampleName}`; + } + const example = await (await get("view", viewPath)).text(); + if (!exampleWidget) { + exampleWidget = ComfyWidgets["STRING"](this, "prompt", ["STRING", { multiline: true }], app).widget; + exampleWidget.inputEl.readOnly = true; + exampleWidget.inputEl.style.opacity = 0.6; + } + exampleWidget.value = example; + }; + + const exampleCb = exampleList.callback; + exampleList.callback = function () { + getExample(); + return exampleCb?.apply(this, arguments) ?? exampleList.value; + }; + + + const listExamples = async () => { + exampleList.disabled = true; + exampleList.options.values = ["[none]"]; + exampleList.value = "[none]"; + let examples = []; + if (this.widgets[0].value?.content) { + try { + examples = await (await get("examples", `/${this.widgets[0].value.content}`)).json(); + } catch (error) {} + } + exampleList.options.values = ["[none]", ...examples]; + exampleList.value = exampleList.options.values[+!!examples.length]; + exampleList.callback(); + exampleList.disabled = !examples.length; + app.graph.setDirtyCanvas(true, true); + }; + + // Expose function to update examples + nodeType.prototype["pysssss.updateExamples"] = listExamples; + + const modelWidget = this.widgets[0]; + const modelCb = modelWidget.callback; + let prev = undefined; + modelWidget.callback = function () { + const ret = modelCb?.apply(this, arguments) ?? modelWidget.value; + let v = ret; + if (ret?.content) { + v = ret.content; + } + if (prev !== v) { + listExamples(); + prev = v; + } + return ret; + }; + setTimeout(() => { + modelWidget.callback(); + }, 30); + }; + + // Prevent adding HIDDEN inputs + const addInput = nodeType.prototype.addInput ?? LGraphNode.prototype.addInput; + nodeType.prototype.addInput = function (_, type) { + if (type === "HIDDEN") return; + return addInput.apply(this, arguments); + }; + } + + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + if (this.imgs) { + // If this node has images then we add an open in new tab item + let img; + if (this.imageIndex != null) { + // An image is selected so select that + img = this.imgs[this.imageIndex]; + } else if (this.overIndex != null) { + // No image is selected but one is hovered + img = this.imgs[this.overIndex]; + } + if (img) { + const nodes = app.graph._nodes.filter( + (n) => n.comfyClass === LORA_LOADER || n.comfyClass === CHECKPOINT_LOADER + ); + if (nodes.length) { + options.unshift({ + content: "Save as Preview", + submenu: { + options: nodes.map((n) => ({ + content: n.widgets[0].value.content, + callback: async () => { + const url = new URL(img.src); + const { image } = await api.fetchApi( + "/pysssss/save/" + encodeURIComponent(`${getType(n)}/${n.widgets[0].value.content}`), + { + method: "POST", + body: JSON.stringify({ + filename: url.searchParams.get("filename"), + subfolder: url.searchParams.get("subfolder"), + type: url.searchParams.get("type"), + }), + headers: { + "content-type": "application/json", + }, + } + ); + n.widgets[0].value.image = image; + app.refreshComboInNodes(); + }, + })), + }, + }); + } + } + } + return getExtraMenuOptions?.apply(this, arguments); + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css new file mode 100644 index 0000000000000000000000000000000000000000..a17d87a025488d21126355df7608562745808ac6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css @@ -0,0 +1,62 @@ +.pysssss-autocomplete { + color: var(--descrip-text); + background-color: var(--comfy-menu-bg); + position: absolute; + font-family: sans-serif; + box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4); + z-index: 9999; + overflow: auto; +} + +.pysssss-autocomplete-item { + cursor: pointer; + padding: 3px 7px; + display: flex; + border-left: 3px solid transparent; + align-items: center; +} + +.pysssss-autocomplete-item--selected { + border-left-color: dodgerblue; +} + +.pysssss-autocomplete-highlight { + font-weight: bold; + text-decoration: underline; + text-decoration-color: dodgerblue; +} + +.pysssss-autocomplete-pill { + margin-left: auto; + font-size: 10px; + color: #fff; + padding: 2px 4px 2px 14px; + position: relative; +} + +.pysssss-autocomplete-pill::after { + content: ""; + display: block; + background: rgba(255, 255, 255, 0.25); + width: calc(100% - 10px); + height: 100%; + position: absolute; + left: 10px; + top: 0; + border-radius: 5px; +} + +.pysssss-autocomplete-pill + .pysssss-autocomplete-pill { + margin-left: 0; +} + +.pysssss-autocomplete-item-info { + margin-left: auto; + transition: filter 0.2s; + will-change: filter; + text-decoration: none; + padding-left: 10px; +} +.pysssss-autocomplete-item-info:hover { + filter: invert(1); +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js new file mode 100644 index 0000000000000000000000000000000000000000..9ffe09f767b82a9a1bf71196fd5a3f9a433de395 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js @@ -0,0 +1,681 @@ +import { $el } from "../../../../scripts/ui.js"; +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +/* + https://github.com/component/textarea-caret-position + The MIT License (MIT) + + Copyright (c) 2015 Jonathan Ong me@jongleberry.com + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ +const getCaretCoordinates = (function () { + // We'll copy the properties below into the mirror div. + // Note that some browsers, such as Firefox, do not concatenate properties + // into their shorthand (e.g. padding-top, padding-bottom etc. -> padding), + // so we have to list every single property explicitly. + var properties = [ + "direction", // RTL support + "boxSizing", + "width", // on Chrome and IE, exclude the scrollbar, so the mirror div wraps exactly as the textarea does + "height", + "overflowX", + "overflowY", // copy the scrollbar for IE + + "borderTopWidth", + "borderRightWidth", + "borderBottomWidth", + "borderLeftWidth", + "borderStyle", + + "paddingTop", + "paddingRight", + "paddingBottom", + "paddingLeft", + + // https://developer.mozilla.org/en-US/docs/Web/CSS/font + "fontStyle", + "fontVariant", + "fontWeight", + "fontStretch", + "fontSize", + "fontSizeAdjust", + "lineHeight", + "fontFamily", + + "textAlign", + "textTransform", + "textIndent", + "textDecoration", // might not make a difference, but better be safe + + "letterSpacing", + "wordSpacing", + + "tabSize", + "MozTabSize", + ]; + + var isBrowser = typeof window !== "undefined"; + var isFirefox = isBrowser && window.mozInnerScreenX != null; + + return function getCaretCoordinates(element, position, options) { + if (!isBrowser) { + throw new Error("textarea-caret-position#getCaretCoordinates should only be called in a browser"); + } + + var debug = (options && options.debug) || false; + if (debug) { + var el = document.querySelector("#input-textarea-caret-position-mirror-div"); + if (el) el.parentNode.removeChild(el); + } + + // The mirror div will replicate the textarea's style + var div = document.createElement("div"); + div.id = "input-textarea-caret-position-mirror-div"; + document.body.appendChild(div); + + var style = div.style; + var computed = window.getComputedStyle ? window.getComputedStyle(element) : element.currentStyle; // currentStyle for IE < 9 + var isInput = element.nodeName === "INPUT"; + + // Default textarea styles + style.whiteSpace = "pre-wrap"; + if (!isInput) style.wordWrap = "break-word"; // only for textarea-s + + // Position off-screen + style.position = "absolute"; // required to return coordinates properly + if (!debug) style.visibility = "hidden"; // not 'display: none' because we want rendering + + // Transfer the element's properties to the div + properties.forEach(function (prop) { + if (isInput && prop === "lineHeight") { + // Special case for s because text is rendered centered and line height may be != height + if (computed.boxSizing === "border-box") { + var height = parseInt(computed.height); + var outerHeight = + parseInt(computed.paddingTop) + + parseInt(computed.paddingBottom) + + parseInt(computed.borderTopWidth) + + parseInt(computed.borderBottomWidth); + var targetHeight = outerHeight + parseInt(computed.lineHeight); + if (height > targetHeight) { + style.lineHeight = height - outerHeight + "px"; + } else if (height === targetHeight) { + style.lineHeight = computed.lineHeight; + } else { + style.lineHeight = 0; + } + } else { + style.lineHeight = computed.height; + } + } else { + style[prop] = computed[prop]; + } + }); + + if (isFirefox) { + // Firefox lies about the overflow property for textareas: https://bugzilla.mozilla.org/show_bug.cgi?id=984275 + if (element.scrollHeight > parseInt(computed.height)) style.overflowY = "scroll"; + } else { + style.overflow = "hidden"; // for Chrome to not render a scrollbar; IE keeps overflowY = 'scroll' + } + + div.textContent = element.value.substring(0, position); + // The second special handling for input type="text" vs textarea: + // spaces need to be replaced with non-breaking spaces - http://stackoverflow.com/a/13402035/1269037 + if (isInput) div.textContent = div.textContent.replace(/\s/g, "\u00a0"); + + var span = document.createElement("span"); + // Wrapping must be replicated *exactly*, including when a long word gets + // onto the next line, with whitespace at the end of the line before (#7). + // The *only* reliable way to do that is to copy the *entire* rest of the + // textarea's content into the created at the caret position. + // For inputs, just '.' would be enough, but no need to bother. + span.textContent = element.value.substring(position) || "."; // || because a completely empty faux span doesn't render at all + div.appendChild(span); + + var coordinates = { + top: span.offsetTop + parseInt(computed["borderTopWidth"]), + left: span.offsetLeft + parseInt(computed["borderLeftWidth"]), + height: parseInt(computed["lineHeight"]), + }; + + if (debug) { + span.style.backgroundColor = "#aaa"; + } else { + document.body.removeChild(div); + } + + return coordinates; + }; +})(); + +/* + Key functions from: + https://github.com/yuku/textcomplete + © Yuku Takahashi - This software is licensed under the MIT license. + + The MIT License (MIT) + + Copyright (c) 2015 Jonathan Ong me@jongleberry.com + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ +const CHAR_CODE_ZERO = "0".charCodeAt(0); +const CHAR_CODE_NINE = "9".charCodeAt(0); + +class TextAreaCaretHelper { + constructor(el, getScale) { + this.el = el; + this.getScale = getScale; + } + + #calculateElementOffset() { + const rect = this.el.getBoundingClientRect(); + const owner = this.el.ownerDocument; + if (owner == null) { + throw new Error("Given element does not belong to document"); + } + const { defaultView, documentElement } = owner; + if (defaultView == null) { + throw new Error("Given element does not belong to window"); + } + const offset = { + top: rect.top + defaultView.pageYOffset, + left: rect.left + defaultView.pageXOffset, + }; + if (documentElement) { + offset.top -= documentElement.clientTop; + offset.left -= documentElement.clientLeft; + } + return offset; + } + + #isDigit(charCode) { + return CHAR_CODE_ZERO <= charCode && charCode <= CHAR_CODE_NINE; + } + + #getLineHeightPx() { + const computedStyle = getComputedStyle(this.el); + const lineHeight = computedStyle.lineHeight; + // If the char code starts with a digit, it is either a value in pixels, + // or unitless, as per: + // https://drafts.csswg.org/css2/visudet.html#propdef-line-height + // https://drafts.csswg.org/css2/cascade.html#computed-value + if (this.#isDigit(lineHeight.charCodeAt(0))) { + const floatLineHeight = parseFloat(lineHeight); + // In real browsers the value is *always* in pixels, even for unit-less + // line-heights. However, we still check as per the spec. + return this.#isDigit(lineHeight.charCodeAt(lineHeight.length - 1)) + ? floatLineHeight * parseFloat(computedStyle.fontSize) + : floatLineHeight; + } + // Otherwise, the value is "normal". + // If the line-height is "normal", calculate by font-size + return this.#calculateLineHeightPx(this.el.nodeName, computedStyle); + } + + /** + * Returns calculated line-height of the given node in pixels. + */ + #calculateLineHeightPx(nodeName, computedStyle) { + const body = document.body; + if (!body) return 0; + + const tempNode = document.createElement(nodeName); + tempNode.innerHTML = " "; + Object.assign(tempNode.style, { + fontSize: computedStyle.fontSize, + fontFamily: computedStyle.fontFamily, + padding: "0", + position: "absolute", + }); + body.appendChild(tempNode); + + // Make sure textarea has only 1 row + if (tempNode instanceof HTMLTextAreaElement) { + tempNode.rows = 1; + } + + // Assume the height of the element is the line-height + const height = tempNode.offsetHeight; + body.removeChild(tempNode); + + return height; + } + + getCursorOffset() { + const scale = this.getScale(); + const elOffset = this.#calculateElementOffset(); + const elScroll = this.#getElScroll(); + const cursorPosition = this.#getCursorPosition(); + const lineHeight = this.#getLineHeightPx(); + const top = elOffset.top - (elScroll.top * scale) + (cursorPosition.top + lineHeight) * scale; + const left = elOffset.left - elScroll.left + cursorPosition.left; + const clientTop = this.el.getBoundingClientRect().top; + if (this.el.dir !== "rtl") { + return { top, left, lineHeight, clientTop }; + } else { + const right = document.documentElement ? document.documentElement.clientWidth - left : 0; + return { top, right, lineHeight, clientTop }; + } + } + + #getElScroll() { + return { top: this.el.scrollTop, left: this.el.scrollLeft }; + } + + #getCursorPosition() { + return getCaretCoordinates(this.el, this.el.selectionEnd); + } + + getBeforeCursor() { + return this.el.selectionStart !== this.el.selectionEnd ? null : this.el.value.substring(0, this.el.selectionEnd); + } + + getAfterCursor() { + return this.el.value.substring(this.el.selectionEnd); + } + + insertAtCursor(value, offset, finalOffset) { + if (this.el.selectionStart != null) { + const startPos = this.el.selectionStart; + const endPos = this.el.selectionEnd; + + // Move selection to beginning of offset + this.el.selectionStart = this.el.selectionStart + offset; + + // Using execCommand to support undo, but since it's officially + // 'deprecated' we need a backup solution, but it won't support undo :( + let pasted = true; + try { + if (!document.execCommand("insertText", false, value)) { + pasted = false; + } + } catch (e) { + console.error("Error caught during execCommand:", e); + pasted = false; + } + + if (!pasted) { + console.error( + "execCommand unsuccessful; not supported. Adding text manually, no undo support."); + textarea.setRangeText(modifiedText, this.el.selectionStart, this.el.selectionEnd, 'end'); + } + + this.el.selectionEnd = this.el.selectionStart = startPos + value.length + offset + (finalOffset ?? 0); + } else { + // Using execCommand to support undo, but since it's officially + // 'deprecated' we need a backup solution, but it won't support undo :( + let pasted = true; + try { + if (!document.execCommand("insertText", false, value)) { + pasted = false; + } + } catch (e) { + console.error("Error caught during execCommand:", e); + pasted = false; + } + + if (!pasted) { + console.error( + "execCommand unsuccessful; not supported. Adding text manually, no undo support."); + this.el.value += value; + } + } + } +} + +/*********************/ + +/** + * @typedef {{ + * text: string, + * priority?: number, + * info?: Function, + * hint?: string, + * showValue?: boolean, + * caretOffset?: number + * }} AutoCompleteEntry + */ +export class TextAreaAutoComplete { + static globalSeparator = ""; + static enabled = true; + static insertOnTab = true; + static insertOnEnter = true; + static replacer = undefined; + static lorasEnabled = false; + static suggestionCount = 20; + + /** @type {Record>} */ + static groups = {}; + /** @type {Set} */ + static globalGroups = new Set(); + /** @type {Record} */ + static globalWords = {}; + /** @type {Record} */ + static globalWordsExclLoras = {}; + + /** @type {HTMLTextAreaElement} */ + el; + + /** @type {Record} */ + overrideWords; + overrideSeparator = ""; + + get words() { + return this.overrideWords ?? TextAreaAutoComplete.globalWords; + } + + get separator() { + return this.overrideSeparator ?? TextAreaAutoComplete.globalSeparator; + } + + /** + * @param {HTMLTextAreaElement} el + */ + constructor(el, words = null, separator = null) { + this.el = el; + this.helper = new TextAreaCaretHelper(el, () => app.canvas.ds.scale); + this.dropdown = $el("div.pysssss-autocomplete"); + this.overrideWords = words; + this.overrideSeparator = separator; + + this.#setup(); + } + + #setup() { + this.el.addEventListener("keydown", this.#keyDown.bind(this)); + this.el.addEventListener("keypress", this.#keyPress.bind(this)); + this.el.addEventListener("keyup", this.#keyUp.bind(this)); + this.el.addEventListener("click", this.#hide.bind(this)); + this.el.addEventListener("blur", () => setTimeout(() => this.#hide(), 150)); + } + + /** + * @param {KeyboardEvent} e + */ + #keyDown(e) { + if (!TextAreaAutoComplete.enabled) return; + + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "ArrowUp": + e.preventDefault(); + if (this.selected.index) { + this.#setSelected(this.currentWords[this.selected.index - 1].wordInfo); + } else { + this.#setSelected(this.currentWords[this.currentWords.length - 1].wordInfo); + } + break; + case "ArrowDown": + e.preventDefault(); + if (this.selected.index === this.currentWords.length - 1) { + this.#setSelected(this.currentWords[0].wordInfo); + } else { + this.#setSelected(this.currentWords[this.selected.index + 1].wordInfo); + } + break; + case "Tab": + if (TextAreaAutoComplete.insertOnTab) { + this.#insertItem(); + e.preventDefault(); + } + break; + } + } + } + + /** + * @param {KeyboardEvent} e + */ + #keyPress(e) { + if (!TextAreaAutoComplete.enabled) return; + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "Enter": + if (!e.ctrlKey) { + if (TextAreaAutoComplete.insertOnEnter) { + this.#insertItem(); + e.preventDefault(); + } + } + break; + } + } + + if (!e.defaultPrevented) { + this.#update(); + } + } + + #keyUp(e) { + if (!TextAreaAutoComplete.enabled) return; + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "Escape": + e.preventDefault(); + this.#hide(); + break; + } + } else if (e.key.length > 1 && e.key != "Delete" && e.key != "Backspace") { + return; + } + if (!e.defaultPrevented) { + this.#update(); + } + } + + #setSelected(item) { + if (this.selected) { + this.selected.el.classList.remove("pysssss-autocomplete-item--selected"); + } + + this.selected = item; + this.selected.el.classList.add("pysssss-autocomplete-item--selected"); + } + + #insertItem() { + if (!this.selected) return; + this.selected.el.click(); + } + + #getFilteredWords(term) { + term = term.toLocaleLowerCase(); + + const priorityMatches = []; + const prefixMatches = []; + const includesMatches = []; + for (const word of Object.keys(this.words)) { + const lowerWord = word.toLocaleLowerCase(); + if (lowerWord === term) { + // Dont include exact matches + continue; + } + + const pos = lowerWord.indexOf(term); + if (pos === -1) { + // No match + continue; + } + + const wordInfo = this.words[word]; + if (wordInfo.priority) { + priorityMatches.push({ pos, wordInfo }); + } else if (pos) { + includesMatches.push({ pos, wordInfo }); + } else { + prefixMatches.push({ pos, wordInfo }); + } + } + + priorityMatches.sort( + (a, b) => + b.wordInfo.priority - a.wordInfo.priority || + a.wordInfo.text.length - b.wordInfo.text.length || + a.wordInfo.text.localeCompare(b.wordInfo.text) + ); + + const top = priorityMatches.length * 0.2; + return priorityMatches.slice(0, top).concat(prefixMatches, priorityMatches.slice(top), includesMatches).slice(0, TextAreaAutoComplete.suggestionCount); + } + + #update() { + let before = this.helper.getBeforeCursor(); + if (before?.length) { + const m = before.match(/([^\s|,|;|"]+)$/); + if (m) { + before = m[0]; + } else { + before = null; + } + } + + if (!before) { + this.#hide(); + return; + } + + this.currentWords = this.#getFilteredWords(before); + if (!this.currentWords.length) { + this.#hide(); + return; + } + + this.dropdown.style.display = ""; + + let hasSelected = false; + const items = this.currentWords.map(({ wordInfo, pos }, i) => { + const parts = [ + $el("span", { + textContent: wordInfo.text.substr(0, pos), + }), + $el("span.pysssss-autocomplete-highlight", { + textContent: wordInfo.text.substr(pos, before.length), + }), + $el("span", { + textContent: wordInfo.text.substr(pos + before.length), + }), + ]; + + if (wordInfo.hint) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.hint, + }) + ); + } + + if (wordInfo.priority) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.priority, + }) + ); + } + + if (wordInfo.value && wordInfo.text !== wordInfo.value && wordInfo.showValue !== false) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.value, + }) + ); + } + + if (wordInfo.info) { + parts.push( + $el("a.pysssss-autocomplete-item-info", { + textContent: "ℹ️", + title: "View info...", + onclick: (e) => { + e.stopPropagation(); + wordInfo.info(); + e.preventDefault(); + }, + }) + ); + } + const item = $el( + "div.pysssss-autocomplete-item", + { + onclick: () => { + this.el.focus(); + let value = wordInfo.value ?? wordInfo.text; + const use_replacer = wordInfo.use_replacer ?? true; + if (TextAreaAutoComplete.replacer && use_replacer) { + value = TextAreaAutoComplete.replacer(value); + } + this.helper.insertAtCursor(value + this.separator, -before.length, wordInfo.caretOffset); + setTimeout(() => { + this.#update(); + }, 150); + }, + onmousemove: () => { + this.#setSelected(wordInfo); + }, + }, + parts + ); + + if (wordInfo === this.selected) { + hasSelected = true; + } + + wordInfo.index = i; + wordInfo.el = item; + + return item; + }); + + this.#setSelected(hasSelected ? this.selected : this.currentWords[0].wordInfo); + this.dropdown.replaceChildren(...items); + + if (!this.dropdown.parentElement) { + document.body.append(this.dropdown); + } + + const position = this.helper.getCursorOffset(); + this.dropdown.style.left = (position.left ?? 0) + "px"; + this.dropdown.style.top = (position.top ?? 0) + "px"; + this.dropdown.style.maxHeight = (window.innerHeight - position.top) + "px"; + } + + #hide() { + this.selected = null; + this.dropdown.remove(); + } + + static updateWords(id, words, addGlobal = true) { + const isUpdate = id in TextAreaAutoComplete.groups; + TextAreaAutoComplete.groups[id] = words; + if (addGlobal) { + TextAreaAutoComplete.globalGroups.add(id); + } + + if (isUpdate) { + // Remerge all words + TextAreaAutoComplete.globalWords = Object.assign( + {}, + ...Object.keys(TextAreaAutoComplete.groups) + .filter((k) => TextAreaAutoComplete.globalGroups.has(k)) + .map((k) => TextAreaAutoComplete.groups[k]) + ); + } else if (addGlobal) { + // Just insert the new words + Object.assign(TextAreaAutoComplete.globalWords, words); + } + } +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js new file mode 100644 index 0000000000000000000000000000000000000000..13d66978c32ef2e5b7673ef9ec5aff2f45bc1305 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js @@ -0,0 +1,244 @@ +// @ts-check +// @ts-ignore +import { ComfyWidgets } from "../../../../scripts/widgets.js"; +// @ts-ignore +import { api } from "../../../../scripts/api.js"; +// @ts-ignore +import { app } from "../../../../scripts/app.js"; + +const PathHelper = { + get(obj, path) { + if (typeof path !== "string") { + // Hardcoded value + return path; + } + + if (path[0] === '"' && path[path.length - 1] === '"') { + // Hardcoded string + return JSON.parse(path); + } + + // Evaluate the path + path = path.split(".").filter(Boolean); + for (const p of path) { + const k = isNaN(+p) ? p : +p; + obj = obj[k]; + } + + return obj; + }, + set(obj, path, value) { + // https://stackoverflow.com/a/54733755 + if (Object(obj) !== obj) return obj; // When obj is not an object + // If not yet an array, get the keys from the string-path + if (!Array.isArray(path)) path = path.toString().match(/[^.[\]]+/g) || []; + path.slice(0, -1).reduce( + ( + a, + c, + i // Iterate all of them except the last one + ) => + Object(a[c]) === a[c] // Does the key exist and is its value an object? + ? // Yes: then follow that path + a[c] + : // No: create the key. Is the next key a potential array-index? + (a[c] = + Math.abs(path[i + 1]) >> 0 === +path[i + 1] + ? [] // Yes: assign a new array object + : {}), // No: assign a new plain object + obj + )[path[path.length - 1]] = value; // Finally assign the value to the last key + return obj; // Return the top-level object to allow chaining + }, +}; + +/*** + @typedef { { + left: string; + op: "eq" | "ne", + right: string + } } IfCondition + + @typedef { { + type: "if", + condition: Array, + true?: Array, + false?: Array + } } IfCallback + + @typedef { { + type: "fetch", + url: string, + then: Array + } } FetchCallback + + @typedef { { + type: "set", + target: string, + value: string + } } SetCallback + + @typedef { { + type: "validate-combo", + } } ValidateComboCallback + + @typedef { IfCallback | FetchCallback | SetCallback | ValidateComboCallback } BindingCallback + + @typedef { { + source: string, + callback: Array + } } Binding +***/ + +/** + * @param {IfCondition} condition + */ +function evaluateCondition(condition, state) { + const left = PathHelper.get(state, condition.left); + const right = PathHelper.get(state, condition.right); + + let r; + if (condition.op === "eq") { + r = left === right; + } else { + r = left !== right; + } + + return r; +} + +/** + * @type { Record) => Promise> } + */ +const callbacks = { + /** + * @param {IfCallback} cb + */ + async if(cb, state) { + // For now only support ANDs + let success = true; + for (const condition of cb.condition) { + const r = evaluateCondition(condition, state); + if (!r) { + success = false; + break; + } + } + + for (const m of cb[success + ""] ?? []) { + await invokeCallback(m, state); + } + }, + /** + * @param {FetchCallback} cb + */ + async fetch(cb, state) { + const url = cb.url.replace(/\{([^\}]+)\}/g, (m, v) => { + return PathHelper.get(state, v); + }); + const res = await (await api.fetchApi(url)).json(); + state["$result"] = res; + for (const m of cb.then) { + await invokeCallback(m, state); + } + }, + /** + * @param {SetCallback} cb + */ + async set(cb, state) { + const value = PathHelper.get(state, cb.value); + PathHelper.set(state, cb.target, value); + }, + async "validate-combo"(cb, state) { + const w = state["$this"]; + const valid = w.options.values.includes(w.value); + if (!valid) { + w.value = w.options.values[0]; + } + }, +}; + +async function invokeCallback(callback, state) { + if (callback.type in callbacks) { + // @ts-ignore + await callbacks[callback.type](callback, state); + } else { + console.warn( + "%c[🐍 pysssss]", + "color: limegreen", + `[binding ${state.$node.comfyClass}.${state.$this.name}]`, + "unsupported binding callback type:", + callback.type + ); + } +} + +app.registerExtension({ + name: "pysssss.Binding", + beforeRegisterNodeDef(node, nodeData) { + const hasBinding = (v) => { + if (!v) return false; + return Object.values(v).find((c) => c[1]?.["pysssss.binding"]); + }; + const inputs = { ...nodeData.input?.required, ...nodeData.input?.optional }; + if (hasBinding(inputs)) { + const onAdded = node.prototype.onAdded; + node.prototype.onAdded = function () { + const r = onAdded?.apply(this, arguments); + + for (const widget of this.widgets || []) { + const bindings = inputs[widget.name][1]?.["pysssss.binding"]; + if (!bindings) continue; + + for (const binding of bindings) { + /** + * @type {import("../../../../../web/types/litegraph.d.ts").IWidget} + */ + const source = this.widgets.find((w) => w.name === binding.source); + if (!source) { + console.warn( + "%c[🐍 pysssss]", + "color: limegreen", + `[binding ${node.comfyClass}.${widget.name}]`, + "unable to find source binding widget:", + binding.source, + binding + ); + continue; + } + + let lastValue; + async function valueChanged() { + const state = { + $this: widget, + $source: source, + $node: node, + }; + + for (const callback of binding.callback) { + await invokeCallback(callback, state); + } + + app.graph.setDirtyCanvas(true, false); + } + + const cb = source.callback; + source.callback = function () { + const v = cb?.apply(this, arguments) ?? source.value; + if (v !== lastValue) { + lastValue = v; + valueChanged(); + } + return v; + }; + + lastValue = source.value; + valueChanged(); + } + } + + return r; + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css new file mode 100644 index 0000000000000000000000000000000000000000..789b7751f5d7fb8dd3426347338906fd57f59b09 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css @@ -0,0 +1,102 @@ +.pysssss-lightbox { + width: 100vw; + height: 100vh; + position: fixed; + top: 0; + left: 0; + z-index: 1000; + background: rgba(0, 0, 0, 0.6); + display: flex; + align-items: center; + transition: opacity 0.2s; +} + +.pysssss-lightbox-prev, +.pysssss-lightbox-next { + height: 60px; + display: flex; + align-items: center; +} + +.pysssss-lightbox-prev:after, +.pysssss-lightbox-next:after { + border-style: solid; + border-width: 0.25em 0.25em 0 0; + display: inline-block; + height: 0.45em; + left: 0.15em; + position: relative; + top: 0.15em; + transform: rotate(-135deg) scale(0.75); + vertical-align: top; + width: 0.45em; + padding: 10px; + font-size: 20px; + margin: 0 10px 0 20px; + transition: color 0.2s; + flex-shrink: 0; + content: ""; +} + +.pysssss-lightbox-next:after { + transform: rotate(45deg) scale(0.75); + margin: 0 20px 0 0px; +} + +.pysssss-lightbox-main { + display: grid; + flex: auto; + place-content: center; + text-align: center; +} + +.pysssss-lightbox-link { + display: flex; + justify-content: center; + align-items: center; + position: relative; +} + +.pysssss-lightbox .lds-ring { + position: absolute; + left: 50%; + top: 50%; + transform: translate(-50%, -50%); +} + +.pysssss-lightbox-img { + max-height: 90vh; + max-width: calc(100vw - 130px); + height: auto; + object-fit: contain; + border: 3px solid white; + border-radius: 4px; + transition: opacity 0.2s; + user-select: none; +} + +.pysssss-lightbox-img:hover { + border-color: dodgerblue; +} + +.pysssss-lightbox-close { + font-size: 80px; + line-height: 1ch; + height: 1ch; + width: 1ch; + position: absolute; + right: 10px; + top: 10px; + padding: 5px; +} + +.pysssss-lightbox-close:after { + content: "\00d7"; +} + +.pysssss-lightbox-close:hover, +.pysssss-lightbox-prev:hover, +.pysssss-lightbox-next:hover { + color: dodgerblue; + cursor: pointer; +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js new file mode 100644 index 0000000000000000000000000000000000000000..c49829e5da6719a298929fe77e4de75e399047bb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js @@ -0,0 +1,149 @@ +import { $el } from "../../../../scripts/ui.js"; +import { addStylesheet, getUrl, loadImage } from "./utils.js"; +import { createSpinner } from "./spinner.js"; + +addStylesheet(getUrl("lightbox.css", import.meta.url)); + +const $$el = (tag, name, ...args) => { + if (name) name = "-" + name; + return $el(tag + ".pysssss-lightbox" + name, ...args); +}; + +const ani = async (a, t, b) => { + a(); + await new Promise((r) => setTimeout(r, t)); + b(); +}; + +export class Lightbox { + constructor() { + this.el = $$el("div", "", { + parent: document.body, + onclick: (e) => { + e.stopImmediatePropagation(); + this.close(); + }, + style: { + display: "none", + opacity: 0, + }, + }); + this.closeBtn = $$el("div", "close", { + parent: this.el, + }); + this.prev = $$el("div", "prev", { + parent: this.el, + onclick: (e) => { + this.update(-1); + e.stopImmediatePropagation(); + }, + }); + this.main = $$el("div", "main", { + parent: this.el, + }); + this.next = $$el("div", "next", { + parent: this.el, + onclick: (e) => { + this.update(1); + e.stopImmediatePropagation(); + }, + }); + this.link = $$el("a", "link", { + parent: this.main, + target: "_blank", + }); + this.spinner = createSpinner(); + this.link.appendChild(this.spinner); + this.img = $$el("img", "img", { + style: { + opacity: 0, + }, + parent: this.link, + onclick: (e) => { + e.stopImmediatePropagation(); + }, + onwheel: (e) => { + if (!(e instanceof WheelEvent) || e.ctrlKey) { + return; + } + const direction = Math.sign(e.deltaY); + this.update(direction); + }, + }); + } + + close() { + ani( + () => (this.el.style.opacity = 0), + 200, + () => (this.el.style.display = "none") + ); + } + + async show(images, index) { + this.images = images; + this.index = index || 0; + await this.update(0); + } + + async update(shift) { + if (shift < 0 && this.index <= 0) { + return; + } + if (shift > 0 && this.index >= this.images.length - 1) { + return; + } + this.index += shift; + + this.prev.style.visibility = this.index ? "unset" : "hidden"; + this.next.style.visibility = this.index === this.images.length - 1 ? "hidden" : "unset"; + + const img = this.images[this.index]; + this.el.style.display = "flex"; + this.el.clientWidth; // Force a reflow + this.el.style.opacity = 1; + this.img.style.opacity = 0; + this.spinner.style.display = "inline-block"; + try { + await loadImage(img); + } catch (err) { + console.error('failed to load image', img, err); + } + this.spinner.style.display = "none"; + this.link.href = img; + this.img.src = img; + this.img.style.opacity = 1; + } + + async updateWithNewImage(img, feedDirection) { + // No-op if lightbox is not open + if (this.el.style.display === "none" || this.el.style.opacity === "0") return; + + // Ensure currently shown image does not change + const [method, shift] = feedDirection === "newest first" ? ["unshift", 1] : ["push", 0]; + this.images[method](img); + await this.update(shift); + } +} + +export const lightbox = new Lightbox(); + +addEventListener('keydown', (event) => { + if (lightbox.el.style.display === 'none') { + return; + } + const { key } = event; + switch (key) { + case 'ArrowLeft': + case 'a': + lightbox.update(-1); + break; + case 'ArrowRight': + case 'd': + lightbox.update(1); + break; + case 'Escape': + lightbox.close(); + break; + } +}); \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css new file mode 100644 index 0000000000000000000000000000000000000000..1769f837811368b02bd28e02003a23694cefebe4 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css @@ -0,0 +1,119 @@ +.pysssss-model-info { + color: white; + font-family: sans-serif; + max-width: 90vw; +} +.pysssss-model-content { + display: flex; + flex-direction: column; + overflow: hidden; +} +.pysssss-model-info h2 { + text-align: center; + margin: 0 0 10px 0; +} +.pysssss-model-info p { + margin: 5px 0; +} +.pysssss-model-info a { + color: dodgerblue; +} +.pysssss-model-info a:hover { + text-decoration: underline; +} +.pysssss-model-tags-list { + display: flex; + flex-wrap: wrap; + list-style: none; + gap: 10px; + max-height: 200px; + overflow: auto; + margin: 10px 0; + padding: 0; +} +.pysssss-model-tag { + background-color: rgb(128, 213, 247); + color: #000; + display: flex; + align-items: center; + gap: 5px; + border-radius: 5px; + padding: 2px 5px; + cursor: pointer; +} +.pysssss-model-tag--selected span::before { + content: "✅"; + position: absolute; + background-color: dodgerblue; + left: 0; + top: 0; + right: 0; + bottom: 0; + text-align: center; +} +.pysssss-model-tag:hover { + outline: 2px solid dodgerblue; +} +.pysssss-model-tag p { + margin: 0; +} +.pysssss-model-tag span { + text-align: center; + border-radius: 5px; + background-color: dodgerblue; + color: #fff; + padding: 2px; + position: relative; + min-width: 20px; + overflow: hidden; +} + +.pysssss-model-metadata .comfy-modal-content { + max-width: 100%; +} +.pysssss-model-metadata label { + margin-right: 1ch; + color: #ccc; +} + +.pysssss-model-metadata span { + color: dodgerblue; +} + +.pysssss-preview { + max-width: 50%; + margin-left: 10px; + position: relative; +} +.pysssss-preview img { + max-height: 300px; +} +.pysssss-preview button { + position: absolute; + font-size: 12px; + bottom: 10px; + right: 10px; +} +.pysssss-preview button+button { + bottom: 34px; +} + +.pysssss-preview button.pysssss-preview-nav { + bottom: unset; + right: 30px; + top: 10px; + font-size: 14px; + line-height: 14px; +} + +.pysssss-preview button.pysssss-preview-nav+.pysssss-preview-nav { + right: 10px; +} +.pysssss-model-notes { + background-color: rgba(0, 0, 0, 0.25); + padding: 5px; + margin-top: 5px; +} +.pysssss-model-notes:empty { + display: none; +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js new file mode 100644 index 0000000000000000000000000000000000000000..63770aa7c97f4acab545bc34c6686d52d3a26cfe --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js @@ -0,0 +1,358 @@ +import { $el, ComfyDialog } from "../../../../scripts/ui.js"; +import { api } from "../../../../scripts/api.js"; +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +class MetadataDialog extends ComfyDialog { + constructor() { + super(); + + this.element.classList.add("pysssss-model-metadata"); + } + show(metadata) { + super.show( + $el( + "div", + Object.keys(metadata).map((k) => + $el("div", [ + $el("label", { textContent: k }), + $el("span", { textContent: typeof metadata[k] === "object" ? JSON.stringify(metadata[k]) : metadata[k] }), + ]) + ) + ) + ); + } +} + +export class ModelInfoDialog extends ComfyDialog { + constructor(name, node) { + super(); + this.name = name; + this.node = node; + this.element.classList.add("pysssss-model-info"); + } + + get customNotes() { + return this.metadata["pysssss.notes"]; + } + + set customNotes(v) { + this.metadata["pysssss.notes"] = v; + } + + get hash() { + return this.metadata["pysssss.sha256"]; + } + + async show(type, value) { + this.type = type; + + const req = api.fetchApi("/pysssss/metadata/" + encodeURIComponent(`${type}/${value}`)); + this.info = $el("div", { style: { flex: "auto" } }); + this.img = $el("img", { style: { display: "none" } }); + this.imgWrapper = $el("div.pysssss-preview", [this.img]); + this.main = $el("main", { style: { display: "flex" } }, [this.info, this.imgWrapper]); + this.content = $el("div.pysssss-model-content", [$el("h2", { textContent: this.name }), this.main]); + + const loading = $el("div", { textContent: "ℹ️ Loading...", parent: this.content }); + + super.show(this.content); + + this.metadata = await (await req).json(); + this.viewMetadata.style.cursor = this.viewMetadata.style.opacity = ""; + this.viewMetadata.removeAttribute("disabled"); + + loading.remove(); + this.addInfo(); + } + + createButtons() { + const btns = super.createButtons(); + this.viewMetadata = $el("button", { + type: "button", + textContent: "View raw metadata", + disabled: "disabled", + style: { + opacity: 0.5, + cursor: "not-allowed", + }, + onclick: (e) => { + if (this.metadata) { + new MetadataDialog().show(this.metadata); + } + }, + }); + + btns.unshift(this.viewMetadata); + return btns; + } + + getNoteInfo() { + function parseNote() { + if (!this.customNotes) return []; + + let notes = []; + // Extract links from notes + const r = new RegExp("(\\bhttps?:\\/\\/[^\\s]+)", "g"); + let end = 0; + let m; + do { + m = r.exec(this.customNotes); + let pos; + let fin = 0; + if (m) { + pos = m.index; + fin = m.index + m[0].length; + } else { + pos = this.customNotes.length; + } + + let pre = this.customNotes.substring(end, pos); + if (pre) { + pre = pre.replaceAll("\n", "
"); + notes.push( + $el("span", { + innerHTML: pre, + }) + ); + } + if (m) { + notes.push( + $el("a", { + href: m[0], + textContent: m[0], + target: "_blank", + }) + ); + } + + end = fin; + } while (m); + return notes; + } + + let textarea; + let notesContainer; + const editText = "✏️ Edit"; + const edit = $el("a", { + textContent: editText, + href: "#", + style: { + float: "right", + color: "greenyellow", + textDecoration: "none", + }, + onclick: async (e) => { + e.preventDefault(); + + if (textarea) { + this.customNotes = textarea.value; + + const resp = await api.fetchApi("/pysssss/metadata/notes/" + encodeURIComponent(`${this.type}/${this.name}`), { + method: "POST", + body: this.customNotes, + }); + + if (resp.status !== 200) { + console.error(resp); + alert(`Error saving notes (${req.status}) ${req.statusText}`); + return; + } + + e.target.textContent = editText; + textarea.remove(); + textarea = null; + + notesContainer.replaceChildren(...parseNote.call(this)); + this.node?.["pysssss.updateExamples"]?.(); + } else { + e.target.textContent = "💾 Save"; + textarea = $el("textarea", { + style: { + width: "100%", + minWidth: "200px", + minHeight: "50px", + }, + textContent: this.customNotes, + }); + e.target.after(textarea); + notesContainer.replaceChildren(); + textarea.style.height = Math.min(textarea.scrollHeight, 300) + "px"; + } + }, + }); + + notesContainer = $el("div.pysssss-model-notes", parseNote.call(this)); + return $el( + "div", + { + style: { display: "contents" }, + }, + [edit, notesContainer] + ); + } + + addInfo() { + const usageHint = this.metadata["modelspec.usage_hint"]; + if (usageHint) { + this.addInfoEntry("Usage Hint", usageHint); + } + this.addInfoEntry("Notes", this.getNoteInfo()); + } + + addInfoEntry(name, value) { + return $el( + "p", + { + parent: this.info, + }, + [ + typeof name === "string" ? $el("label", { textContent: name + ": " }) : name, + typeof value === "string" ? $el("span", { textContent: value }) : value, + ] + ); + } + + async getCivitaiDetails() { + const req = await fetch("https://civitai.com/api/v1/model-versions/by-hash/" + this.hash); + if (req.status === 200) { + return await req.json(); + } else if (req.status === 404) { + throw new Error("Model not found"); + } else { + throw new Error(`Error loading info (${req.status}) ${req.statusText}`); + } + } + + addCivitaiInfo() { + const promise = this.getCivitaiDetails(); + const content = $el("span", { textContent: "ℹ️ Loading..." }); + + this.addInfoEntry( + $el("label", [ + $el("img", { + style: { + width: "18px", + position: "relative", + top: "3px", + margin: "0 5px 0 0", + }, + src: "https://civitai.com/favicon.ico", + }), + $el("span", { textContent: "Civitai: " }), + ]), + content + ); + + return promise + .then((info) => { + content.replaceChildren( + $el("a", { + href: "https://civitai.com/models/" + info.modelId, + textContent: "View " + info.model.name, + target: "_blank", + }) + ); + + const allPreviews = info.images?.filter((i) => i.type === "image"); + const previews = allPreviews?.filter((i) => i.nsfwLevel <= ModelInfoDialog.nsfwLevel); + if (previews?.length) { + let previewIndex = 0; + let preview; + const updatePreview = () => { + preview = previews[previewIndex]; + this.img.src = preview.url; + }; + + updatePreview(); + this.img.style.display = ""; + + this.img.title = `${previews.length} previews.`; + if (allPreviews.length !== previews.length) { + this.img.title += ` ${allPreviews.length - previews.length} images hidden due to NSFW level.`; + } + + this.imgSave = $el("button", { + textContent: "Use as preview", + parent: this.imgWrapper, + onclick: async () => { + // Convert the preview to a blob + const blob = await (await fetch(this.img.src)).blob(); + + // Store it in temp + const name = "temp_preview." + new URL(this.img.src).pathname.split(".")[1]; + const body = new FormData(); + body.append("image", new File([blob], name)); + body.append("overwrite", "true"); + body.append("type", "temp"); + + const resp = await api.fetchApi("/upload/image", { + method: "POST", + body, + }); + + if (resp.status !== 200) { + console.error(resp); + alert(`Error saving preview (${req.status}) ${req.statusText}`); + return; + } + + // Use as preview + await api.fetchApi("/pysssss/save/" + encodeURIComponent(`${this.type}/${this.name}`), { + method: "POST", + body: JSON.stringify({ + filename: name, + type: "temp", + }), + headers: { + "content-type": "application/json", + }, + }); + app.refreshComboInNodes(); + }, + }); + + $el("button", { + textContent: "Show metadata", + parent: this.imgWrapper, + onclick: async () => { + if (preview.meta && Object.keys(preview.meta).length) { + new MetadataDialog().show(preview.meta); + } else { + alert("No image metadata found"); + } + }, + }); + + const addNavButton = (icon, direction) => { + $el("button.pysssss-preview-nav", { + textContent: icon, + parent: this.imgWrapper, + onclick: async () => { + previewIndex += direction; + if (previewIndex < 0) { + previewIndex = previews.length - 1; + } else if (previewIndex >= previews.length) { + previewIndex = 0; + } + updatePreview(); + }, + }); + }; + + if (previews.length > 1) { + addNavButton("‹", -1); + addNavButton("›", 1); + } + } else if (info.images?.length) { + $el("span", { style: { opacity: 0.6 }, textContent: "⚠️ All images hidden due to NSFW level setting.", parent: this.imgWrapper }); + } + + return info; + }) + .catch((err) => { + content.textContent = "⚠️ " + err.message; + }); + } +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css new file mode 100644 index 0000000000000000000000000000000000000000..05328d123a9fb55c14fd8cb91653e931bbe69b0a --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css @@ -0,0 +1,35 @@ +.pysssss-lds-ring { + display: inline-block; + position: absolute; + width: 80px; + height: 80px; +} +.pysssss-lds-ring div { + box-sizing: border-box; + display: block; + position: absolute; + width: 64px; + height: 64px; + margin: 8px; + border: 5px solid #fff; + border-radius: 50%; + animation: lds-ring 1.2s cubic-bezier(0.5, 0, 0.5, 1) infinite; + border-color: #fff transparent transparent transparent; +} +.pysssss-lds-ring div:nth-child(1) { + animation-delay: -0.45s; +} +.pysssss-lds-ring div:nth-child(2) { + animation-delay: -0.3s; +} +.pysssss-lds-ring div:nth-child(3) { + animation-delay: -0.15s; +} +@keyframes lds-ring { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cdad662229572e542203d08952fec4392e --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js @@ -0,0 +1,9 @@ +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +export function createSpinner() { + const div = document.createElement("div"); + div.innerHTML = `
`; + return div.firstElementChild; +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js new file mode 100644 index 0000000000000000000000000000000000000000..e4ae9218f8c38a28d7a183f54b8b65b02871c3e3 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js @@ -0,0 +1,30 @@ +import { $el } from "../../../../scripts/ui.js"; + +export function addStylesheet(url) { + if (url.endsWith(".js")) { + url = url.substr(0, url.length - 2) + "css"; + } + $el("link", { + parent: document.head, + rel: "stylesheet", + type: "text/css", + href: url.startsWith("http") ? url : getUrl(url), + }); +} + +export function getUrl(path, baseUrl) { + if (baseUrl) { + return new URL(path, baseUrl).toString(); + } else { + return new URL("../" + path, import.meta.url).toString(); + } +} + +export async function loadImage(url) { + return new Promise((res, rej) => { + const img = new Image(); + img.onload = res; + img.onerror = rej; + img.src = url; + }); +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js new file mode 100644 index 0000000000000000000000000000000000000000..fd6fde8f36a90afa0eef8d324a3216f909c62201 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js @@ -0,0 +1,90 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.ContextMenuHook", + init() { + const getOrSet = (target, name, create) => { + if (name in target) return target[name]; + return (target[name] = create()); + }; + const symbol = getOrSet(window, "__pysssss__", () => Symbol("__pysssss__")); + const store = getOrSet(window, symbol, () => ({})); + const contextMenuHook = getOrSet(store, "contextMenuHook", () => ({})); + for (const e of ["ctor", "preAddItem", "addItem"]) { + if (!contextMenuHook[e]) { + contextMenuHook[e] = []; + } + } + + // Big ol' hack to get allow customizing the context menu + // Replace the addItem function with our own that wraps the context of "this" with a proxy + // That proxy then replaces the constructor with another proxy + // That proxy then calls the custom ContextMenu that supports filters + const ctorProxy = new Proxy(LiteGraph.ContextMenu, { + construct(target, args) { + return new LiteGraph.ContextMenu(...args); + }, + }); + + function triggerCallbacks(name, getArgs, handler) { + const callbacks = contextMenuHook[name]; + if (callbacks && callbacks instanceof Array) { + for (const cb of callbacks) { + const r = cb(...getArgs()); + handler?.call(this, r); + } + } else { + console.warn("[pysssss 🐍]", `invalid ${name} callbacks`, callbacks, name in contextMenuHook); + } + } + + const addItem = LiteGraph.ContextMenu.prototype.addItem; + LiteGraph.ContextMenu.prototype.addItem = function () { + const proxy = new Proxy(this, { + get(target, prop) { + if (prop === "constructor") { + return ctorProxy; + } + return target[prop]; + }, + }); + proxy.__target__ = this; + + let el; + let args = arguments; + triggerCallbacks( + "preAddItem", + () => [el, this, args], + (r) => { + if (r !== undefined) el = r; + } + ); + + if (el === undefined) { + el = addItem.apply(proxy, arguments); + } + + triggerCallbacks( + "addItem", + () => [el, this, args], + (r) => { + if (r !== undefined) el = r; + } + ); + return el; + }; + + // We also need to patch the ContextMenu constructor to unwrap the parent else it fails a LiteGraph type check + const ctxMenu = LiteGraph.ContextMenu; + LiteGraph.ContextMenu = function (values, options) { + if (options?.parentMenu) { + if (options.parentMenu.__target__) { + options.parentMenu = options.parentMenu.__target__; + } + } + + triggerCallbacks("ctor", () => [values, options]); + ctxMenu.call(this, values, options); + }; + LiteGraph.ContextMenu.prototype = ctxMenu.prototype; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js new file mode 100644 index 0000000000000000000000000000000000000000..75b3723a65428818c2d2f807bd396585317a2ab7 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js @@ -0,0 +1,98 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; + +const colorShade = (col, amt) => { + col = col.replace(/^#/, ""); + if (col.length === 3) col = col[0] + col[0] + col[1] + col[1] + col[2] + col[2]; + + let [r, g, b] = col.match(/.{2}/g); + [r, g, b] = [parseInt(r, 16) + amt, parseInt(g, 16) + amt, parseInt(b, 16) + amt]; + + r = Math.max(Math.min(255, r), 0).toString(16); + g = Math.max(Math.min(255, g), 0).toString(16); + b = Math.max(Math.min(255, b), 0).toString(16); + + const rr = (r.length < 2 ? "0" : "") + r; + const gg = (g.length < 2 ? "0" : "") + g; + const bb = (b.length < 2 ? "0" : "") + b; + + return `#${rr}${gg}${bb}`; +}; + +app.registerExtension({ + name: "pysssss.CustomColors", + setup() { + let picker; + let activeNode; + const onMenuNodeColors = LGraphCanvas.onMenuNodeColors; + LGraphCanvas.onMenuNodeColors = function (value, options, e, menu, node) { + const r = onMenuNodeColors.apply(this, arguments); + requestAnimationFrame(() => { + const menus = document.querySelectorAll(".litecontextmenu"); + for (let i = menus.length - 1; i >= 0; i--) { + if (menus[i].firstElementChild.textContent.includes("No color") || menus[i].firstElementChild.value?.content?.includes("No color")) { + $el( + "div.litemenu-entry.submenu", + { + parent: menus[i], + $: (el) => { + el.onclick = () => { + LiteGraph.closeAllContextMenus(); + if (!picker) { + picker = $el("input", { + type: "color", + parent: document.body, + style: { + display: "none", + }, + }); + picker.onchange = () => { + if (activeNode) { + const fApplyColor = function(node){ + if (picker.value) { + if (node.constructor === LiteGraph.LGraphGroup) { + node.color = picker.value; + } else { + node.color = colorShade(picker.value, 20); + node.bgcolor = picker.value; + } + } + } + const graphcanvas = LGraphCanvas.active_canvas; + if (!graphcanvas.selected_nodes || Object.keys(graphcanvas.selected_nodes).length <= 1){ + fApplyColor(activeNode); + } else { + for (let i in graphcanvas.selected_nodes) { + fApplyColor(graphcanvas.selected_nodes[i]); + } + } + + activeNode.setDirtyCanvas(true, true); + } + }; + } + activeNode = null; + picker.value = node.bgcolor; + activeNode = node; + picker.click(); + }; + }, + }, + [ + $el("span", { + style: { + paddingLeft: "4px", + display: "block", + }, + textContent: "🎨 Custom", + }), + ] + ); + break; + } + } + }); + return r; + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js new file mode 100644 index 0000000000000000000000000000000000000000..0b1a3dd475c9305110f0ae8e4b3dcc6dd56e445e --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js @@ -0,0 +1,58 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; + +// Simple script that adds the current queue size to the window title +// Adds a favicon that changes color while active + +app.registerExtension({ + name: "pysssss.FaviconStatus", + async setup() { + let link = document.querySelector("link[rel~='icon']"); + if (!link) { + link = document.createElement("link"); + link.rel = "icon"; + document.head.appendChild(link); + } + + const getUrl = (active, user) => new URL(`assets/favicon${active ? "-active" : ""}${user ? ".user" : ""}.ico`, import.meta.url); + const testUrl = async (active) => { + const url = getUrl(active, true); + const r = await fetch(url, { + method: "HEAD", + }); + if (r.status === 200) { + return url; + } + return getUrl(active, false); + }; + const activeUrl = await testUrl(true); + const idleUrl = await testUrl(false); + + let executing = false; + const update = () => (link.href = executing ? activeUrl : idleUrl); + + for (const e of ["execution_start", "progress"]) { + api.addEventListener(e, () => { + executing = true; + update(); + }); + } + + api.addEventListener("executing", ({ detail }) => { + // null will be sent when it's finished + executing = !!detail; + update(); + }); + + api.addEventListener("status", ({ detail }) => { + let title = "ComfyUI"; + if (detail && detail.exec_info.queue_remaining) { + title = `(${detail.exec_info.queue_remaining}) ${title}`; + } + document.title = title; + update(); + executing = false; + }); + update(); + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js new file mode 100644 index 0000000000000000000000000000000000000000..d5768f8d0d64fd8eb84fa54c2dc2c26ae46c2422 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js @@ -0,0 +1,91 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "pysssss.GraphArrange", + setup(app) { + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + options.push({ content: "Arrange (float left)", callback: () => graph.arrange() }); + options.push({ + content: "Arrange (float right)", + callback: () => { + (function () { + var margin = 50; + var layout; + + const nodes = this.computeExecutionOrder(false, true); + const columns = []; + + // Find node first use + for (let i = nodes.length - 1; i >= 0; i--) { + const node = nodes[i]; + let max = null; + for (const out of node.outputs || []) { + if (out.links) { + for (const link of out.links) { + const outNode = app.graph.getNodeById(app.graph.links[link].target_id); + if (!outNode) continue; + var l = outNode._level - 1; + if (max === null) max = l; + else if (l < max) max = l; + } + } + } + if (max != null) node._level = max; + } + + for (let i = 0; i < nodes.length; ++i) { + const node = nodes[i]; + const col = node._level || 1; + if (!columns[col]) { + columns[col] = []; + } + columns[col].push(node); + } + + let x = margin; + + for (let i = 0; i < columns.length; ++i) { + const column = columns[i]; + if (!column) { + continue; + } + column.sort((a, b) => { + var as = !(a.type === "SaveImage" || a.type === "PreviewImage"); + var bs = !(b.type === "SaveImage" || b.type === "PreviewImage"); + var r = as - bs; + if (r === 0) r = (a.inputs?.length || 0) - (b.inputs?.length || 0); + if (r === 0) r = (a.outputs?.length || 0) - (b.outputs?.length || 0); + return r; + }); + let max_size = 100; + let y = margin + LiteGraph.NODE_TITLE_HEIGHT; + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] = layout == LiteGraph.VERTICAL_LAYOUT ? y : x; + node.pos[1] = layout == LiteGraph.VERTICAL_LAYOUT ? x : y; + const max_size_index = layout == LiteGraph.VERTICAL_LAYOUT ? 1 : 0; + if (node.size[max_size_index] > max_size) { + max_size = node.size[max_size_index]; + } + const node_size_index = layout == LiteGraph.VERTICAL_LAYOUT ? 0 : 1; + y += node.size[node_size_index] + margin + LiteGraph.NODE_TITLE_HEIGHT + j; + } + + // Right align in column + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] += max_size - node.size[0]; + } + x += max_size + margin; + } + + this.setDirtyCanvas(true, true); + }).apply(app.graph); + }, + }); + return options; + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js new file mode 100644 index 0000000000000000000000000000000000000000..3a63a34e437a4b935ce4d0898f96c448d36a9c67 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js @@ -0,0 +1,575 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; +import { lightbox } from "./common/lightbox.js"; + +$el("style", { + textContent: ` + .pysssss-image-feed { + position: absolute; + background: var(--comfy-menu-bg); + color: var(--fg-color); + z-index: 99; + font-family: sans-serif; + font-size: 12px; + display: flex; + flex-direction: column; + } + div > .pysssss-image-feed { + position: static; + } + .pysssss-image-feed--top, .pysssss-image-feed--bottom { + width: 100vw; + min-height: 30px; + max-height: calc(var(--max-size, 20) * 1vh); + } + .pysssss-image-feed--top { + top: 0; + } + .pysssss-image-feed--bottom { + bottom: 0; + flex-direction: column-reverse; + padding-top: 5px; + } + .pysssss-image-feed--left, .pysssss-image-feed--right { + top: 0; + height: 100vh; + min-width: 200px; + max-width: calc(var(--max-size, 10) * 1vw); + } + .comfyui-body-left .pysssss-image-feed--left, .comfyui-body-right .pysssss-image-feed--right { + height: 100%; + } + .pysssss-image-feed--left { + left: 0; + } + .pysssss-image-feed--right { + right: 0; + } + + .pysssss-image-feed--left .pysssss-image-feed-menu, .pysssss-image-feed--right .pysssss-image-feed-menu { + flex-direction: column; + } + + .pysssss-image-feed-menu { + position: relative; + flex: 0 1 min-content; + display: flex; + gap: 5px; + padding: 5px; + justify-content: space-between; + } + .pysssss-image-feed-btn-group { + align-items: stretch; + display: flex; + gap: .5rem; + flex: 0 1 fit-content; + justify-content: flex-end; + } + .pysssss-image-feed-btn { + background-color:var(--comfy-input-bg); + border-radius:5px; + border:2px solid var(--border-color); + color: var(--fg-color); + cursor:pointer; + display:inline-block; + flex: 0 1 fit-content; + text-decoration:none; + } + .pysssss-image-feed-btn.sizing-btn:checked { + filter: invert(); + } + .pysssss-image-feed-btn.clear-btn { + padding: 5px 20px; + } + .pysssss-image-feed-btn.hide-btn { + padding: 5px; + aspect-ratio: 1 / 1; + } + .pysssss-image-feed-btn:hover { + filter: brightness(1.2); + } + .pysssss-image-feed-btn:active { + position:relative; + top:1px; + } + + .pysssss-image-feed-menu section { + border-radius: 5px; + background: rgba(0,0,0,0.6); + padding: 0 5px; + display: flex; + gap: 5px; + align-items: center; + position: relative; + } + .pysssss-image-feed-menu section span { + white-space: nowrap; + } + .pysssss-image-feed-menu section input { + flex: 1 1 100%; + background: rgba(0,0,0,0.6); + border-radius: 5px; + overflow: hidden; + z-index: 100; + } + + .sizing-menu { + position: relative; + } + + .size-controls-flyout { + position: absolute; + transform: scaleX(0%); + transition: 200ms ease-out; + transition-delay: 500ms; + z-index: 101; + width: 300px; + } + + .sizing-menu:hover .size-controls-flyout { + transform: scale(1, 1); + transition: 200ms linear; + transition-delay: 0; + } + .pysssss-image-feed--bottom .size-controls-flyout { + transform: scale(1,0); + transform-origin: bottom; + bottom: 0; + left: 0; + } + .pysssss-image-feed--top .size-controls-flyout { + transform: scale(1,0); + transform-origin: top; + top: 0; + left: 0; + } + .pysssss-image-feed--left .size-controls-flyout { + transform: scale(0, 1); + transform-origin: left; + top: 0; + left: 0; + } + .pysssss-image-feed--right .size-controls-flyout { + transform: scale(0, 1); + transform-origin: right; + top: 0; + right: 0; + } + + .pysssss-image-feed-menu > * { + min-height: 24px; + } + .pysssss-image-feed-list { + flex: 1 1 auto; + overflow-y: auto; + display: grid; + align-items: center; + justify-content: center; + gap: 4px; + grid-auto-rows: min-content; + grid-template-columns: repeat(var(--img-sz, 3), 1fr); + transition: 100ms linear; + scrollbar-gutter: stable both-edges; + padding: 5px; + background: var(--comfy-input-bg); + border-radius: 5px; + margin: 5px; + margin-top: 0px; + } + .pysssss-image-feed-list:empty { + display: none; + } + .pysssss-image-feed-list div { + height: 100%; + text-align: center; + } + .pysssss-image-feed-list::-webkit-scrollbar { + background: var(--comfy-input-bg); + border-radius: 5px; + } + .pysssss-image-feed-list::-webkit-scrollbar-thumb { + background:var(--comfy-menu-bg); + border: 5px solid transparent; + border-radius: 8px; + background-clip: content-box; + } + .pysssss-image-feed-list::-webkit-scrollbar-thumb:hover { + background: var(--border-color); + background-clip: content-box; + } + .pysssss-image-feed-list img { + object-fit: var(--img-fit, contain); + max-width: 100%; + max-height: calc(var(--max-size) * 1vh); + border-radius: 4px; + } + .pysssss-image-feed-list img:hover { + filter: brightness(1.2); + }`, + parent: document.body, +}); + +app.registerExtension({ + name: "pysssss.ImageFeed", + async setup() { + let visible = true; + const seenImages = new Map(); + const showButton = $el("button.comfy-settings-btn", { + textContent: "🖼️", + style: { + right: "16px", + cursor: "pointer", + display: "none", + }, + }); + let showMenuButton; + if (!app.menu?.element.style.display && app.menu?.settingsGroup) { + showMenuButton = new (await import("../../../scripts/ui/components/button.js")).ComfyButton({ + icon: "image-multiple", + action: () => showButton.click(), + tooltip: "Show Image Feed 🐍", + content: "Show Image Feed 🐍", + }); + showMenuButton.enabled = false; + app.menu.settingsGroup.append(showMenuButton); + } + + const getVal = (n, d) => { + const v = localStorage.getItem("pysssss.ImageFeed." + n); + if (v && !isNaN(+v)) { + return v; + } + return d; + }; + + const saveVal = (n, v) => { + localStorage.setItem("pysssss.ImageFeed." + n, v); + }; + + const imageFeed = $el("div.pysssss-image-feed"); + const imageList = $el("div.pysssss-image-feed-list"); + + function updateMenuParent(location) { + if (showMenuButton) { + const el = document.querySelector(".comfyui-body-" + location); + if (!el) return; + el.append(imageFeed); + } else { + if (!imageFeed.parent) { + document.body.append(imageFeed); + } + } + } + + const feedLocation = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.Location", + name: "🐍 Image Feed Location", + defaultValue: "bottom", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: "🐍 Image Feed Location:", + }), + ]), + $el("td", [ + $el( + "select", + { + style: { + fontSize: "14px", + }, + oninput: (e) => { + feedLocation.value = e.target.value; + imageFeed.className = `pysssss-image-feed pysssss-image-feed--${feedLocation.value}`; + updateMenuParent(feedLocation.value); + window.dispatchEvent(new Event("resize")); + }, + }, + ["left", "top", "right", "bottom", "hidden"].map((m) => + $el("option", { + value: m, + textContent: m, + selected: feedLocation.value === m, + }) + ) + ), + ]), + ]); + }, + onChange(value) { + if (value === "hidden") { + imageFeed.remove(); + showButton.style.display = "none"; + } else { + showButton.style.display = visible ? "none" : "unset"; + imageFeed.className = `pysssss-image-feed pysssss-image-feed--${value}`; + updateMenuParent(value); + } + }, + }); + + const feedDirection = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.Direction", + name: "🐍 Image Feed Direction", + defaultValue: "newest first", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: "🐍 Image Feed Direction:", + }), + ]), + $el("td", [ + $el( + "select", + { + style: { + fontSize: "14px", + }, + oninput: (e) => { + feedDirection.value = e.target.value; + imageList.replaceChildren(...[...imageList.childNodes].reverse()); + }, + }, + ["newest first", "oldest first"].map((m) => + $el("option", { + value: m, + textContent: m, + selected: feedDirection.value === m, + }) + ) + ), + ]), + ]); + }, + }); + + const deduplicateFeed = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.Deduplication", + name: "🐍 Image Feed Deduplication", + tooltip: `Ensures unique images in the image feed but at the cost of CPU-bound performance impact \ +(from hundreds of milliseconds to seconds per image, depending on byte size). For workflows that produce duplicate images, turning this setting on may yield overall client-side performance improvements \ +by reducing the number of images in the feed. + +Recommended: "enabled (max performance)" uness images are erroneously deduplicated.`, + defaultValue: 0, + type: "combo", + options: (value) => { + let dedupeOptions = {"disabled": 0, "enabled (slow)": 1, "enabled (performance)": 0.5, "enabled (max performance)": 0.25}; + return Object.entries(dedupeOptions).map(([k, v]) => ({ + value: v, + text: k, + selected: k === value, + }) + ) + }, + }); + + const maxImages = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.MaxImages", + name: "🐍 Image Feed Max Images", + tooltip: `Limits the number of images in the feed to a maximum, removing the oldest images as new ones are added.`, + defaultValue: 0, + type: "number", + }); + + const clearButton = $el("button.pysssss-image-feed-btn.clear-btn", { + textContent: "Clear", + onclick: () => { + imageList.replaceChildren(); + window.dispatchEvent(new Event("resize")); + }, + }); + + const hideButton = $el("button.pysssss-image-feed-btn.hide-btn", { + textContent: "❌", + onclick: () => { + imageFeed.style.display = "none"; + showButton.style.display = feedLocation.value === "hidden" ? "none" : "unset"; + if (showMenuButton) showMenuButton.enabled = true; + saveVal("Visible", 0); + visible = false; + window.dispatchEvent(new Event("resize")); + }, + }); + + let columnInput; + function updateColumnCount(v) { + columnInput.parentElement.title = `Controls the number of columns in the feed (${v} columns).\nClick label to set custom value.`; + imageFeed.style.setProperty("--img-sz", v); + saveVal("ImageSize", v); + columnInput.max = Math.max(10, v, columnInput.max); + columnInput.value = v; + window.dispatchEvent(new Event("resize")); + } + + function addImageToFeed(href) { + const method = feedDirection.value === "newest first" ? "prepend" : "append"; + + if (maxImages.value > 0 && imageList.children.length >= maxImages.value) { + imageList.children[method === "prepend" ? imageList.children.length - 1 : 0].remove(); + } + + imageList[method]( + $el("div", [ + $el( + "a", + { + target: "_blank", + href, + onclick: (e) => { + const imgs = [...imageList.querySelectorAll("img")].map((img) => img.getAttribute("src")); + lightbox.show(imgs, imgs.indexOf(href)); + e.preventDefault(); + }, + }, + [$el("img", { src: href })] + ), + ]) + ); + // If lightbox is open, update it with new image + lightbox.updateWithNewImage(href, feedDirection.value); + } + + imageFeed.append( + $el("div.pysssss-image-feed-menu", [ + $el("section.sizing-menu", {}, [ + $el("label.size-control-handle", { textContent: "↹ Resize Feed" }), + $el("div.size-controls-flyout", {}, [ + $el("section.size-control.feed-size-control", {}, [ + $el("span", { + textContent: "Feed Size...", + }), + $el("input", { + type: "range", + min: 10, + max: 80, + oninput: (e) => { + e.target.parentElement.title = `Controls the maximum size of the image feed panel (${e.target.value}vh)`; + imageFeed.style.setProperty("--max-size", e.target.value); + saveVal("FeedSize", e.target.value); + window.dispatchEvent(new Event("resize")); + }, + $: (el) => { + requestAnimationFrame(() => { + el.value = getVal("FeedSize", 25); + el.oninput({ target: el }); + }); + }, + }), + ]), + $el("section.size-control.image-size-control", {}, [ + $el("a", { + textContent: "Column count...", + style: { + cursor: "pointer", + textDecoration: "underline", + }, + onclick: () => { + const v = +prompt("Enter custom column count", 20); + if (!isNaN(v)) { + updateColumnCount(v); + } + }, + }), + $el("input", { + type: "range", + min: 1, + max: 10, + step: 1, + oninput: (e) => { + updateColumnCount(e.target.value); + }, + $: (el) => { + columnInput = el; + requestAnimationFrame(() => { + updateColumnCount(getVal("ImageSize", 4)); + }); + }, + }), + ]), + ]), + ]), + $el("div.pysssss-image-feed-btn-group", {}, [clearButton, hideButton]), + ]), + imageList + ); + showButton.onclick = () => { + imageFeed.style.display = "flex"; + showButton.style.display = "none"; + if (showMenuButton) showMenuButton.enabled = false; + + saveVal("Visible", 1); + visible = true; + window.dispatchEvent(new Event("resize")); + }; + document.querySelector(".comfy-settings-btn").after(showButton); + window.dispatchEvent(new Event("resize")); + + if (!+getVal("Visible", 1)) { + hideButton.onclick(); + } + + api.addEventListener("executed", ({ detail }) => { + if (visible && detail?.output?.images) { + if (detail.node?.includes?.(":")) { + // Ignore group nodes + const n = app.graph.getNodeById(detail.node.split(":")[0]); + if (n?.getInnerNodes) return; + } + + for (const src of detail.output.images) { + const href = `./view?filename=${encodeURIComponent(src.filename)}&type=${src.type}& + subfolder=${encodeURIComponent(src.subfolder)}&t=${+new Date()}`; + + // deduplicateFeed.value is essentially the scaling factor used for image hashing + // but when deduplication is disabled, this value is "0" + if (deduplicateFeed.value > 0) { + // deduplicate by ignoring images with the same filename/type/subfolder + const fingerprint = JSON.stringify({ filename: src.filename, type: src.type, subfolder: src.subfolder }); + if (seenImages.has(fingerprint)) { + // NOOP: image is a duplicate + } else { + seenImages.set(fingerprint, true); + let img = $el("img", { src: href }) + img.onerror = () => { + // fall back to default behavior + addImageToFeed(href); + } + img.onload = () => { + // redraw the image onto a canvas to strip metadata (resize if performance mode) + let imgCanvas = document.createElement("canvas"); + let imgScalar = deduplicateFeed.value; + imgCanvas.width = imgScalar * img.width; + imgCanvas.height = imgScalar * img.height; + + let imgContext = imgCanvas.getContext("2d"); + imgContext.drawImage(img, 0, 0, imgCanvas.width, imgCanvas.height); + const data = imgContext.getImageData(0, 0, imgCanvas.width, imgCanvas.height); + + // calculate fast hash of the image data + let hash = 0; + for (const b of data.data) { + hash = ((hash << 5) - hash) + b; + } + + // add image to feed if we've never seen the hash before + if (seenImages.has(hash)) { + // NOOP: image is a duplicate + } else { + // if we got to here, then the image is unique--so add to feed + seenImages.set(hash, true); + addImageToFeed(href); + } + } + } + } else { + addImageToFeed(href); + } + } + } + }); + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js new file mode 100644 index 0000000000000000000000000000000000000000..d52fcff5d3fe9e6f55493de5edb6ce6e9c1dcbfc --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js @@ -0,0 +1,54 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.KSamplerAdvDenoise", + async beforeRegisterNodeDef(nodeType) { + // Add menu options to conver to/from widgets + const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = origGetExtraMenuOptions?.apply?.(this, arguments); + + let stepsWidget = null; + let startAtWidget = null; + let endAtWidget = null; + for (const w of this.widgets || []) { + if (w.name === "steps") { + stepsWidget = w; + } else if (w.name === "start_at_step") { + startAtWidget = w; + } else if (w.name === "end_at_step") { + endAtWidget = w; + } + } + + if (stepsWidget && startAtWidget && endAtWidget) { + options.push( + { + content: "Set Denoise", + callback: () => { + const steps = +prompt("How many steps do you want?", 15); + if (isNaN(steps)) { + return; + } + const denoise = +prompt("How much denoise? (0-1)", 0.5); + if (isNaN(denoise)) { + return; + } + + stepsWidget.value = Math.floor(steps / Math.max(0, Math.min(1, denoise))); + stepsWidget.callback?.(stepsWidget.value); + + startAtWidget.value = stepsWidget.value - steps; + startAtWidget.callback?.(startAtWidget.value); + + endAtWidget.value = stepsWidget.value; + endAtWidget.callback?.(endAtWidget.value); + }, + }, + null + ); + } + + return r; + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js new file mode 100644 index 0000000000000000000000000000000000000000..1305f1c1c08a71990ac1b8c2615afc4992a61823 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js @@ -0,0 +1,57 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; + +const id = "pysssss.LinkRenderMode"; +const ext = { + name: id, + async setup(app) { + if (app.extensions.find((ext) => ext.name === "Comfy.LinkRenderMode")) { + console.log("%c[🐍 pysssss]", "color: limegreen", "Skipping LinkRenderMode as core extension found"); + return; + } + const setting = app.ui.settings.addSetting({ + id, + name: "🐍 Link Render Mode", + defaultValue: 2, + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: "🐍 Link Render Mode:", + }), + ]), + $el("td", [ + $el( + "select", + { + textContent: "Manage", + style: { + fontSize: "14px", + }, + oninput: (e) => { + setting.value = e.target.value; + app.canvas.links_render_mode = +e.target.value; + app.graph.setDirtyCanvas(true, true); + }, + }, + LiteGraph.LINK_RENDER_MODES.map((m, i) => + $el("option", { + value: i, + textContent: m, + selected: i == app.canvas.links_render_mode, + }) + ) + ), + ]), + ]); + }, + onChange(value) { + app.canvas.links_render_mode = +value; + app.graph.setDirtyCanvas(true); + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js new file mode 100644 index 0000000000000000000000000000000000000000..3a924964e75f1e428fde425f534f4ff7dfe92cd0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js @@ -0,0 +1,185 @@ +import { app } from "../../../scripts/app.js"; + +// Adds lock/unlock menu item for nodes + groups to prevent moving / resizing them + +const LOCKED = Symbol(); + +function lockArray(arr, isLocked) { + const v = []; + + for (let i = 0; i < 2; i++) { + v[i] = arr[i]; + + Object.defineProperty(arr, i, { + get() { + return v[i]; + }, + set(value) { + if (!isLocked()) { + v[i] = value; + } + }, + }); + } +} + +app.registerExtension({ + name: "pysssss.Locking", + init() { + function lockGroup(node) { + node[LOCKED] = true; + } + + // Add the locked flag to serialization + const serialize = LGraphGroup.prototype.serialize; + LGraphGroup.prototype.serialize = function () { + const o = serialize.apply(this, arguments); + o.locked = !!this[LOCKED]; + return o; + }; + + // On initial configure lock group if required + const configure = LGraphGroup.prototype.configure; + LGraphGroup.prototype.configure = function (o) { + configure.apply(this, arguments); + if (o.locked) { + lockGroup(this); + } + }; + + // Allow click through locked groups + const getGroupOnPos = LGraph.prototype.getGroupOnPos; + LGraph.prototype.getGroupOnPos = function () { + const r = getGroupOnPos.apply(this, arguments); + if (r && r[LOCKED] && !new Error().stack.includes("processContextMenu")) return null; + return r; + }; + + // Add menu options for lock/unlock + const getGroupMenuOptions = LGraphCanvas.prototype.getGroupMenuOptions; + LGraphCanvas.prototype.getGroupMenuOptions = function (node) { + const opts = getGroupMenuOptions.apply(this, arguments); + + opts.unshift( + node[LOCKED] + ? { + content: "Unlock", + callback: () => { + delete node[LOCKED]; + }, + } + : { + content: "Lock", + callback: () => lockGroup(node), + }, + null + ); + + return opts; + }; + }, + setup() { + const drawNodeShape = LGraphCanvas.prototype.drawNodeShape; + LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) { + const res = drawNodeShape.apply(this, arguments); + + if (node[LOCKED]) { + ctx.fillText("🔒", node.getBounding()[2] - 20, -10); + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType) { + const nodesArray = (nodes) => { + if (nodes) { + if (nodes instanceof Array) { + return nodes; + } + return [nodes]; + } + return Object.values(app.canvas.selected_nodes); + }; + function unlockNode(nodes) { + nodes = nodesArray(nodes); + for (const node of nodes) { + delete node[LOCKED]; + } + app.graph.setDirtyCanvas(true, false); + } + function lockNode(nodes) { + nodes = nodesArray(nodes); + for (const node of nodes) { + if (node[LOCKED]) continue; + + node[LOCKED] = true; + // Same hack as above + lockArray(node.pos, () => !!node[LOCKED]); + + // Size is set by both replacing the value and setting individual values + // So define a new property that can prevent reassignment + const sz = [node.size[0], node.size[1]]; + Object.defineProperty(node, "size", { + get() { + return sz; + }, + set(value) { + if (!node[LOCKED]) { + sz[0] = value[0]; + sz[1] = value[1]; + } + }, + }); + // And then lock each element if required + lockArray(sz, () => !!node[LOCKED]); + } + + app.graph.setDirtyCanvas(true, false); + } + + // Add menu options for lock/unlock + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = getExtraMenuOptions ? getExtraMenuOptions.apply(this, arguments) : undefined; + + options.splice( + options.findIndex((o) => o?.content === "Properties") + 1, + 0, + null, + this[LOCKED] + ? { + content: "Unlock", + callback: () => { + unlockNode(); + }, + } + : { + content: "Lock", + callback: () => lockNode(), + } + ); + + return r; + }; + + // Add the locked flag to serialization + const onSerialize = nodeType.prototype.onSerialize; + nodeType.prototype.onSerialize = function (o) { + if (onSerialize) { + onSerialize.apply(this, arguments); + } + o.locked = this[LOCKED]; + }; + + // On initial configure lock node if required + const onConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function (o) { + if (onConfigure) { + onConfigure.apply(this, arguments); + } + if (o.locked) { + lockNode(this); + } + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js new file mode 100644 index 0000000000000000000000000000000000000000..1dc80eee95d180f5676201aabf3cda5c8ba7cf96 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js @@ -0,0 +1,44 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +app.registerExtension({ + name: "pysssss.MathExpression", + init() { + const STRING = ComfyWidgets.STRING; + ComfyWidgets.STRING = function (node, inputName, inputData) { + const r = STRING.apply(this, arguments); + r.widget.dynamicPrompts = inputData?.[1].dynamicPrompts; + return r; + }; + }, + beforeRegisterNodeDef(nodeType) { + if (nodeType.comfyClass === "MathExpression|pysssss") { + const onDrawForeground = nodeType.prototype.onDrawForeground; + + nodeType.prototype.onNodeCreated = function() { + // These are typed as any to bypass backend validation + // update frontend to restrict types + for(const input of this.inputs) { + input.type = "INT,FLOAT,IMAGE,LATENT"; + } + } + + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + + const v = app.nodeOutputs?.[this.id + ""]; + if (!this.flags.collapsed && v) { + const text = v.value[0] + ""; + ctx.save(); + ctx.font = "bold 12px sans-serif"; + ctx.fillStyle = "dodgerblue"; + const sz = ctx.measureText(text); + ctx.fillText(text, this.size[0] - sz.width - 5, LiteGraph.NODE_SLOT_HEIGHT * 3); + ctx.restore(); + } + + return r; + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js new file mode 100644 index 0000000000000000000000000000000000000000..c16672dd97f2ec42b9f7218cc6fe8e5babcbe305 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js @@ -0,0 +1,49 @@ +import { app } from "../../../scripts/app.js"; + +const id = "pysssss.MiddleClickAddDefaultNode"; +const ext = { + name: id, + async setup(app) { + app.ui.settings.addSetting({ + id, + name: "🐍 Middle click slot to add", + defaultValue: "Reroute", + type: "combo", + options: (value) => + [ + ...Object.keys(LiteGraph.registered_node_types) + .filter((k) => k.includes("Reroute")) + .sort((a, b) => { + if (a === "Reroute") return -1; + if (b === "Reroute") return 1; + return a.localeCompare(b); + }), + "[None]", + ].map((m) => ({ + value: m, + text: m, + selected: !value ? m === "[None]" : m === value, + })), + onChange(value) { + const enable = value && value !== "[None]"; + if (value === true) { + value = "Reroute"; + } + LiteGraph.middle_click_slot_add_default_node = enable; + if (enable) { + for (const arr of Object.values(LiteGraph.slot_types_default_in).concat( + Object.values(LiteGraph.slot_types_default_out) + )) { + const idx = arr.indexOf(value); + if (idx !== 0) { + arr.splice(idx, 1); + } + arr.unshift(value); + } + } + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js new file mode 100644 index 0000000000000000000000000000000000000000..f294cde7795433380fec10dc5c60bb4d1d9af1cb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js @@ -0,0 +1,412 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; +import { $el } from "../../../scripts/ui.js"; +import { ModelInfoDialog } from "./common/modelInfoDialog.js"; + +const MAX_TAGS = 500; +const NsfwLevel = { + PG: 1, + PG13: 2, + R: 4, + X: 8, + XXX: 16, + Blocked: 32, +}; + +export class LoraInfoDialog extends ModelInfoDialog { + getTagFrequency() { + if (!this.metadata.ss_tag_frequency) return []; + + const datasets = JSON.parse(this.metadata.ss_tag_frequency); + const tags = {}; + for (const setName in datasets) { + const set = datasets[setName]; + for (const t in set) { + if (t in tags) { + tags[t] += set[t]; + } else { + tags[t] = set[t]; + } + } + } + + return Object.entries(tags).sort((a, b) => b[1] - a[1]); + } + + getResolutions() { + let res = []; + if (this.metadata.ss_bucket_info) { + const parsed = JSON.parse(this.metadata.ss_bucket_info); + if (parsed?.buckets) { + for (const { resolution, count } of Object.values(parsed.buckets)) { + res.push([count, `${resolution.join("x")} * ${count}`]); + } + } + } + res = res.sort((a, b) => b[0] - a[0]).map((a) => a[1]); + let r = this.metadata.ss_resolution; + if (r) { + const s = r.split(","); + const w = s[0].replace("(", ""); + const h = s[1].replace(")", ""); + res.push(`${w.trim()}x${h.trim()} (Base res)`); + } else if ((r = this.metadata["modelspec.resolution"])) { + res.push(r + " (Base res"); + } + if (!res.length) { + res.push("⚠️ Unknown"); + } + return res; + } + + getTagList(tags) { + return tags.map((t) => + $el( + "li.pysssss-model-tag", + { + dataset: { + tag: t[0], + }, + $: (el) => { + el.onclick = () => { + el.classList.toggle("pysssss-model-tag--selected"); + }; + }, + }, + [ + $el("p", { + textContent: t[0], + }), + $el("span", { + textContent: t[1], + }), + ] + ) + ); + } + + addTags() { + let tags = this.getTagFrequency(); + if (!tags?.length) { + tags = this.metadata["modelspec.tags"]?.split(",").map((t) => [t.trim(), 1]); + } + let hasMore; + if (tags?.length) { + const c = tags.length; + let list; + if (c > MAX_TAGS) { + tags = tags.slice(0, MAX_TAGS); + hasMore = $el("p", [ + $el("span", { textContent: `⚠️ Only showing first ${MAX_TAGS} tags ` }), + $el("a", { + href: "#", + textContent: `Show all ${c}`, + onclick: () => { + list.replaceChildren(...this.getTagList(this.getTagFrequency())); + hasMore.remove(); + }, + }), + ]); + } + list = $el("ol.pysssss-model-tags-list", this.getTagList(tags)); + this.tags = $el("div", [list]); + } else { + this.tags = $el("p", { textContent: "⚠️ No tag frequency metadata found" }); + } + + this.content.append(this.tags); + + if (hasMore) { + this.content.append(hasMore); + } + } + + addExample(title, value, name) { + const textArea = $el("textarea", { + textContent: value, + style: { + whiteSpace: "pre-wrap", + margin: "10px 0", + color: "#fff", + background: "#222", + padding: "5px", + borderRadius: "5px", + maxHeight: "250px", + overflow: "auto", + display: "block", + border: "none", + width: "calc(100% - 10px)", + }, + }); + $el( + "p", + { + parent: this.content, + textContent: `${title}: `, + }, + [ + textArea, + $el("button", { + onclick: async () => { + await this.saveAsExample(textArea.value, `${name}.txt`); + }, + textContent: "Save as Example", + style: { + fontSize: "14px", + }, + }), + $el("hr"), + ] + ); + } + + async addInfo() { + this.addInfoEntry("Name", this.metadata.ss_output_name || "⚠️ Unknown"); + this.addInfoEntry("Base Model", this.metadata.ss_sd_model_name || "⚠️ Unknown"); + this.addInfoEntry("Clip Skip", this.metadata.ss_clip_skip || "⚠️ Unknown"); + + this.addInfoEntry( + "Resolution", + $el( + "select", + this.getResolutions().map((r) => $el("option", { textContent: r })) + ) + ); + + super.addInfo(); + const p = this.addCivitaiInfo(); + this.addTags(); + + const info = await p; + this.addExample("Trained Words", info?.trainedWords?.join(", ") ?? "", "trainedwords"); + + const triggerPhrase = this.metadata["modelspec.trigger_phrase"]; + if (triggerPhrase) { + this.addExample("Trigger Phrase", triggerPhrase, "triggerphrase"); + } + + $el("div", { + parent: this.content, + innerHTML: info?.description ?? this.metadata["modelspec.description"] ?? "[No description provided]", + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + + async saveAsExample(example, name = "example.txt") { + if (!example.length) { + return; + } + try { + name = prompt("Enter example name", name); + if (!name) return; + + await api.fetchApi("/pysssss/examples/" + encodeURIComponent(`${this.type}/${this.name}`), { + method: "POST", + body: JSON.stringify({ + name, + example, + }), + headers: { + "content-type": "application/json", + }, + }); + this.node?.["pysssss.updateExamples"]?.(); + alert("Saved!"); + } catch (error) { + console.error(error); + alert("Error saving: " + error); + } + } + + createButtons() { + const btns = super.createButtons(); + function tagsToCsv(tags) { + return tags.map((el) => el.dataset.tag).join(", "); + } + function copyTags(e, tags) { + const textarea = $el("textarea", { + parent: document.body, + style: { + position: "fixed", + }, + textContent: tagsToCsv(tags), + }); + textarea.select(); + try { + document.execCommand("copy"); + if (!e.target.dataset.text) { + e.target.dataset.text = e.target.textContent; + } + e.target.textContent = "Copied " + tags.length + " tags"; + setTimeout(() => { + e.target.textContent = e.target.dataset.text; + }, 1000); + } catch (ex) { + prompt("Copy to clipboard: Ctrl+C, Enter", text); + } finally { + document.body.removeChild(textarea); + } + } + + btns.unshift( + $el("button", { + type: "button", + textContent: "Save Selected as Example", + onclick: async (e) => { + const tags = tagsToCsv([...this.tags.querySelectorAll(".pysssss-model-tag--selected")]); + await this.saveAsExample(tags); + }, + }), + $el("button", { + type: "button", + textContent: "Copy Selected", + onclick: (e) => { + copyTags(e, [...this.tags.querySelectorAll(".pysssss-model-tag--selected")]); + }, + }), + $el("button", { + type: "button", + textContent: "Copy All", + onclick: (e) => { + copyTags(e, [...this.tags.querySelectorAll(".pysssss-model-tag")]); + }, + }) + ); + + return btns; + } +} + +class CheckpointInfoDialog extends ModelInfoDialog { + async addInfo() { + super.addInfo(); + const info = await this.addCivitaiInfo(); + if (info) { + this.addInfoEntry("Base Model", info.baseModel || "⚠️ Unknown"); + + $el("div", { + parent: this.content, + innerHTML: info.description, + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + } +} + +const lookups = {}; + +function addInfoOption(node, type, infoClass, widgetNamePattern, opts) { + const widgets = widgetNamePattern + ? node.widgets.filter((w) => w.name === widgetNamePattern || w.name.match(`^${widgetNamePattern}$`)) + : [node.widgets[0]]; + for (const widget of widgets) { + let value = widget.value; + if (value?.content) { + value = value.content; + } + if (!value || value === "None") { + return; + } + let optName; + const split = value.split(/[.\\/]/); + optName = split[split.length - 2]; + opts.push({ + content: optName, + callback: async () => { + new infoClass(value, node).show(type, value); + }, + }); + } +} + +function addTypeOptions(node, typeName, options) { + const type = typeName.toLowerCase() + "s"; + const values = lookups[typeName][node.type]; + if (!values) return; + + const widgets = Object.keys(values); + const cls = type === "loras" ? LoraInfoDialog : CheckpointInfoDialog; + + const opts = []; + for (const w of widgets) { + addInfoOption(node, type, cls, w, opts); + } + + if (!opts.length) return; + + if (opts.length === 1) { + opts[0].content = `View ${typeName} info...`; + options.unshift(opts[0]); + } else { + options.unshift({ + title: `View ${typeName} info...`, + has_submenu: true, + submenu: { + options: opts, + }, + }); + } +} + +app.registerExtension({ + name: "pysssss.ModelInfo", + setup() { + const addSetting = (type, defaultValue) => { + app.ui.settings.addSetting({ + id: `pysssss.ModelInfo.${type}Nodes`, + name: `🐍 Model Info - ${type} Nodes/Widgets`, + type: "text", + defaultValue, + tooltip: `Comma separated list of NodeTypeName or NodeTypeName.WidgetName that contain ${type} node names that should have the View Info option available.\nIf no widget name is specifed the first widget will be used. Regex matches (e.g. NodeName..*lora_\\d+) are supported in the widget name.`, + onChange(value) { + lookups[type] = value.split(",").reduce((p, n) => { + n = n.trim(); + const pos = n.indexOf("."); + const split = pos === -1 ? [n] : [n.substring(0, pos), n.substring(pos + 1)]; + p[split[0]] ??= {}; + p[split[0]][split[1] ?? ""] = true; + return p; + }, {}); + }, + }); + }; + addSetting( + "Lora", + ["LoraLoader.lora_name", "LoraLoader|pysssss", "LoraLoaderModelOnly.lora_name", "LoRA Stacker.lora_name.*"].join(",") + ); + addSetting( + "Checkpoint", + ["CheckpointLoader.ckpt_name", "CheckpointLoaderSimple", "CheckpointLoader|pysssss", "Efficient Loader", "Eff. Loader SDXL"].join(",") + ); + + app.ui.settings.addSetting({ + id: `pysssss.ModelInfo.NsfwLevel`, + name: `🐍 Model Info - Image Preview Max NSFW Level`, + type: "combo", + defaultValue: "PG13", + options: Object.keys(NsfwLevel), + tooltip: `Hides preview images that are tagged as a higher NSFW level`, + onChange(value) { + ModelInfoDialog.nsfwLevel = NsfwLevel[value] ?? NsfwLevel.PG; + }, + }); + }, + beforeRegisterNodeDef(nodeType) { + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + if (this.widgets) { + for (const type in lookups) { + addTypeOptions(this, type, options); + } + } + + return getExtraMenuOptions?.apply(this, arguments); + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js new file mode 100644 index 0000000000000000000000000000000000000000..002f897ec3cb18cff11feb9baffafcd29eb4b3bd --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js @@ -0,0 +1,82 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; + +// Adds a menu option to toggle follow the executing node +// Adds a menu option to go to the currently executing node +// Adds a menu option to go to a node by type + +app.registerExtension({ + name: "pysssss.NodeFinder", + setup() { + let followExecution = false; + + const centerNode = (id) => { + if (!followExecution || !id) return; + const node = app.graph.getNodeById(id); + if (!node) return; + app.canvas.centerOnNode(node); + }; + + api.addEventListener("executing", ({ detail }) => centerNode(detail)); + + // Add canvas menu options + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + options.push(null, { + content: followExecution ? "Stop following execution" : "Follow execution", + callback: () => { + if ((followExecution = !followExecution)) { + centerNode(app.runningNodeId); + } + }, + }); + if (app.runningNodeId) { + options.push({ + content: "Show executing node", + callback: () => { + const node = app.graph.getNodeById(app.runningNodeId); + if (!node) return; + app.canvas.centerOnNode(node); + }, + }); + } + + const nodes = app.graph._nodes; + const types = nodes.reduce((p, n) => { + if (n.type in p) { + p[n.type].push(n); + } else { + p[n.type] = [n]; + } + return p; + }, {}); + options.push({ + content: "Go to node", + has_submenu: true, + submenu: { + options: Object.keys(types) + .sort() + .map((t) => ({ + content: t, + has_submenu: true, + submenu: { + options: types[t] + .sort((a, b) => { + return a.pos[0] - b.pos[0]; + }) + .map((n) => ({ + content: `${n.getTitle()} - #${n.id} (${n.pos[0]}, ${n.pos[1]})`, + callback: () => { + app.canvas.centerOnNode(n); + }, + })), + }, + })), + }, + }); + + return options; + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js new file mode 100644 index 0000000000000000000000000000000000000000..80421e5c1f7d574deda60343dd515f305ec5e348 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js @@ -0,0 +1,36 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "pysssss.PlaySound", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "PlaySound|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = async function () { + onExecuted?.apply(this, arguments); + if (this.widgets[0].value === "on empty queue") { + if (app.ui.lastQueueSize !== 0) { + await new Promise((r) => setTimeout(r, 500)); + } + if (app.ui.lastQueueSize !== 0) { + return; + } + } + let file = this.widgets[2].value; + if (!file) { + file = "notify.mp3"; + } + if (!file.startsWith("http")) { + if (!file.includes("/")) { + file = "assets/" + file; + } + file = new URL(file, import.meta.url) + } + + const url = new URL(file); + const audio = new Audio(url); + audio.volume = this.widgets[1].value; + audio.play(); + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js new file mode 100644 index 0000000000000000000000000000000000000000..1ed0f721b75b7b850693e25ecac7f557b50fae0d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js @@ -0,0 +1,255 @@ +import { app } from "../../../scripts/app.js"; + +// Allows you to manage preset tags for e.g. common negative prompt +// Also performs replacements on any text field e.g. allowing you to use preset text in CLIP Text encode fields + +let replaceRegex; +const id = "pysssss.PresetText.Presets"; +const MISSING = Symbol(); + +const getPresets = () => { + let items; + try { + items = JSON.parse(localStorage.getItem(id)); + } catch (error) {} + if (!items || !items.length) { + items = [{ name: "default negative", value: "worst quality" }]; + } + return items; +}; + +let presets = getPresets(); + +app.registerExtension({ + name: "pysssss.PresetText", + setup() { + app.ui.settings.addSetting({ + id: "pysssss.PresetText.ReplacementRegex", + name: "🐍 Preset Text Replacement Regex", + type: "text", + defaultValue: "(?:^|[^\\w])(?@(?[\\w-]+))", + tooltip: + "The regex should return two named capture groups: id (the name of the preset text to use), replace (the matched text to replace)", + attrs: { + style: { + fontFamily: "monospace", + }, + }, + onChange(value) { + if (!value) { + replaceRegex = null; + return; + } + try { + replaceRegex = new RegExp(value, "g"); + } catch (error) { + alert("Error creating regex for preset text replacement, no replacements will be performed."); + replaceRegex = null; + } + }, + }); + + const drawNodeWidgets = LGraphCanvas.prototype.drawNodeWidgets + LGraphCanvas.prototype.drawNodeWidgets = function(node) { + const c = LiteGraph.WIDGET_BGCOLOR; + try { + if(node[MISSING]) { + LiteGraph.WIDGET_BGCOLOR = "red" + } + return drawNodeWidgets.apply(this, arguments); + } finally { + LiteGraph.WIDGET_BGCOLOR = c; + } + } + }, + registerCustomNodes() { + class PresetTextNode { + constructor() { + this.isVirtualNode = true; + this.serialize_widgets = true; + this.addOutput("text", "STRING"); + + const widget = this.addWidget("combo", "value", presets[0].name, () => {}, { + values: presets.map((p) => p.name), + }); + this.addWidget("button", "Manage", "Manage", () => { + const container = document.createElement("div"); + Object.assign(container.style, { + display: "grid", + gridTemplateColumns: "1fr 1fr", + gap: "10px", + }); + + const addNew = document.createElement("button"); + addNew.textContent = "Add New"; + addNew.classList.add("pysssss-presettext-addnew"); + Object.assign(addNew.style, { + fontSize: "13px", + gridColumn: "1 / 3", + color: "dodgerblue", + width: "auto", + textAlign: "center", + }); + addNew.onclick = () => { + addRow({ name: "", value: "" }); + }; + container.append(addNew); + + function addRow(p) { + const name = document.createElement("input"); + const nameLbl = document.createElement("label"); + name.value = p.name; + nameLbl.textContent = "Name:"; + nameLbl.append(name); + + const value = document.createElement("input"); + const valueLbl = document.createElement("label"); + value.value = p.value; + valueLbl.textContent = "Value:"; + valueLbl.append(value); + + addNew.before(nameLbl, valueLbl); + } + for (const p of presets) { + addRow(p); + } + + const help = document.createElement("span"); + help.textContent = "To remove a preset set the name or value to blank"; + help.style.gridColumn = "1 / 3"; + container.append(help); + + dialog.show(""); + dialog.textElement.append(container); + }); + + const dialog = new app.ui.dialog.constructor(); + dialog.element.classList.add("comfy-settings"); + + const closeButton = dialog.element.querySelector("button"); + closeButton.textContent = "CANCEL"; + const saveButton = document.createElement("button"); + saveButton.textContent = "SAVE"; + saveButton.onclick = function () { + const inputs = dialog.element.querySelectorAll("input"); + const p = []; + for (let i = 0; i < inputs.length; i += 2) { + const n = inputs[i]; + const v = inputs[i + 1]; + if (!n.value.trim() || !v.value.trim()) { + continue; + } + p.push({ name: n.value, value: v.value }); + } + + widget.options.values = p.map((p) => p.name); + if (!widget.options.values.includes(widget.value)) { + widget.value = widget.options.values[0]; + } + + presets = p; + localStorage.setItem(id, JSON.stringify(presets)); + + dialog.close(); + }; + + closeButton.before(saveButton); + + this.applyToGraph = function (workflow) { + // For each output link copy our value over the original widget value + if (this.outputs[0].links && this.outputs[0].links.length) { + for (const l of this.outputs[0].links) { + const link_info = app.graph.links[l]; + const outNode = app.graph.getNodeById(link_info.target_id); + const outIn = outNode && outNode.inputs && outNode.inputs[link_info.target_slot]; + if (outIn.widget) { + const w = outNode.widgets.find((w) => w.name === outIn.widget.name); + if (!w) continue; + const preset = presets.find((p) => p.name === widget.value); + if (!preset) { + this[MISSING] = true; + app.graph.setDirtyCanvas(true, true); + const msg = `Preset text '${widget.value}' not found. Please fix this and queue again.`; + throw new Error(msg); + } + delete this[MISSING]; + w.value = preset.value; + } + } + } + }; + } + } + + LiteGraph.registerNodeType( + "PresetText|pysssss", + Object.assign(PresetTextNode, { + title: "Preset Text 🐍", + }) + ); + + PresetTextNode.category = "utils"; + }, + nodeCreated(node) { + if (node.widgets) { + // Locate dynamic prompt text widgets + const widgets = node.widgets.filter((n) => n.type === "customtext" || n.type === "text"); + for (const widget of widgets) { + const callbacks = [ + () => { + let prompt = widget.value; + if (replaceRegex && typeof prompt.replace !== 'undefined') { + prompt = prompt.replace(replaceRegex, (match, p1, p2, index, text, groups) => { + if (!groups.replace || !groups.id) return match; // No match, bad regex? + + const preset = presets.find((p) => p.name.replaceAll(/\s/g, "-") === groups.id); + if (!preset) return match; // Invalid name + + const pos = match.indexOf(groups.replace); + return match.substring(0, pos) + preset.value; + }); + } + return prompt; + }, + ]; + let inheritedSerializeValue = widget.serializeValue || null; + + let called = false; + const serializeValue = async (workflowNode, widgetIndex) => { + const origWidgetValue = widget.value; + if (called) return origWidgetValue; + called = true; + + let allCallbacks = [...callbacks]; + if (inheritedSerializeValue) { + allCallbacks.push(inheritedSerializeValue) + } + let valueIsUndefined = false; + + for (const cb of allCallbacks) { + let value = await cb(workflowNode, widgetIndex); + // Need to check the callback return value before it is set on widget.value as it coerces it to a string (even for undefined) + if (value === undefined) valueIsUndefined = true; + widget.value = value; + } + + const prompt = valueIsUndefined ? undefined : widget.value; + widget.value = origWidgetValue; + + called = false; + + return prompt; + }; + + Object.defineProperty(widget, "serializeValue", { + get() { + return serializeValue; + }, + set(cb) { + inheritedSerializeValue = cb; + }, + }); + } + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js new file mode 100644 index 0000000000000000000000000000000000000000..6baf1d3521a76ce4b0b9628c5be79e859bbacfe4 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js @@ -0,0 +1,196 @@ +import { app } from "../../../scripts/app.js"; + +// Adds a bunch of context menu entries for quickly adding common steps + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +function getOrAddVAELoader(node) { + let vaeNode = app.graph._nodes.find((n) => n.type === "VAELoader"); + if (!vaeNode) { + vaeNode = addNode("VAELoader", node); + } + return vaeNode; +} + +function addNode(name, nextTo, options) { + options = { select: true, shiftY: 0, before: false, ...(options || {}) }; + const node = LiteGraph.createNode(name); + app.graph.add(node); + node.pos = [ + options.before ? nextTo.pos[0] - node.size[0] - 30 : nextTo.pos[0] + nextTo.size[0] + 30, + nextTo.pos[1] + options.shiftY, + ]; + if (options.select) { + app.canvas.selectNode(node, false); + } + return node; +} + +app.registerExtension({ + name: "pysssss.QuickNodes", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.input && nodeData.input.required) { + const keys = Object.keys(nodeData.input.required); + for (let i = 0; i < keys.length; i++) { + if (nodeData.input.required[keys[i]][0] === "VAE") { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Use VAE", + callback: () => { + getOrAddVAELoader(this).connect(0, this, i); + }, + }); + }); + break; + } + } + } + + if (nodeData.name === "KSampler") { + addMenuHandler(nodeType, function (_, options) { + options.unshift( + { + content: "Add Blank Input", + callback: () => { + const imageNode = addNode("EmptyLatentImage", this, { before: true }); + imageNode.connect(0, this, 3); + }, + }, + { + content: "Add Hi-res Fix", + callback: () => { + const upscaleNode = addNode("LatentUpscale", this); + this.connect(0, upscaleNode, 0); + + const sampleNode = addNode("KSampler", upscaleNode); + + for (let i = 0; i < 3; i++) { + const l = this.getInputLink(i); + if (l) { + app.graph.getNodeById(l.origin_id).connect(l.origin_slot, sampleNode, i); + } + } + + upscaleNode.connect(0, sampleNode, 3); + }, + }, + { + content: "Add 2nd Pass", + callback: () => { + const upscaleNode = addNode("LatentUpscale", this); + this.connect(0, upscaleNode, 0); + + const ckptNode = addNode("CheckpointLoaderSimple", this); + const sampleNode = addNode("KSampler", ckptNode); + + const positiveLink = this.getInputLink(1); + const negativeLink = this.getInputLink(2); + const positiveNode = positiveLink + ? app.graph.add(app.graph.getNodeById(positiveLink.origin_id).clone()) + : addNode("CLIPTextEncode"); + const negativeNode = negativeLink + ? app.graph.add(app.graph.getNodeById(negativeLink.origin_id).clone()) + : addNode("CLIPTextEncode"); + + ckptNode.connect(0, sampleNode, 0); + ckptNode.connect(1, positiveNode, 0); + ckptNode.connect(1, negativeNode, 0); + positiveNode.connect(0, sampleNode, 1); + negativeNode.connect(0, sampleNode, 2); + upscaleNode.connect(0, sampleNode, 3); + }, + }, + { + content: "Add Save Image", + callback: () => { + const decodeNode = addNode("VAEDecode", this); + this.connect(0, decodeNode, 0); + + getOrAddVAELoader(decodeNode).connect(0, decodeNode, 1); + + const saveNode = addNode("SaveImage", decodeNode); + decodeNode.connect(0, saveNode, 0); + }, + } + ); + }); + } + + if (nodeData.name === "CheckpointLoaderSimple") { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Add Clip Skip", + callback: () => { + const clipSkipNode = addNode("CLIPSetLastLayer", this); + const clipLinks = this.outputs[1].links ? this.outputs[1].links.map((l) => ({ ...graph.links[l] })) : []; + + this.disconnectOutput(1); + this.connect(1, clipSkipNode, 0); + + for (const clipLink of clipLinks) { + clipSkipNode.connect(0, clipLink.target_id, clipLink.target_slot); + } + }, + }); + }); + } + + if ( + nodeData.name === "CheckpointLoaderSimple" || + nodeData.name === "CheckpointLoader" || + nodeData.name === "CheckpointLoader|pysssss" || + nodeData.name === "LoraLoader" || + nodeData.name === "LoraLoader|pysssss" + ) { + addMenuHandler(nodeType, function (_, options) { + function addLora(type) { + const loraNode = addNode(type, this); + + const modelLinks = this.outputs[0].links ? this.outputs[0].links.map((l) => ({ ...graph.links[l] })) : []; + const clipLinks = this.outputs[1].links ? this.outputs[1].links.map((l) => ({ ...graph.links[l] })) : []; + + this.disconnectOutput(0); + this.disconnectOutput(1); + + this.connect(0, loraNode, 0); + this.connect(1, loraNode, 1); + + for (const modelLink of modelLinks) { + loraNode.connect(0, modelLink.target_id, modelLink.target_slot); + } + + for (const clipLink of clipLinks) { + loraNode.connect(1, clipLink.target_id, clipLink.target_slot); + } + } + options.unshift( + { + content: "Add LoRA", + callback: () => addLora.call(this, "LoraLoader"), + }, + { + content: "Add 🐍 LoRA", + callback: () => addLora.call(this, "LoraLoader|pysssss"), + }, + { + content: "Add Prompts", + callback: () => { + const positiveNode = addNode("CLIPTextEncode", this); + const negativeNode = addNode("CLIPTextEncode", this, { shiftY: positiveNode.size[1] + 30 }); + + this.connect(1, positiveNode, 0); + this.connect(1, negativeNode, 0); + }, + } + ); + }); + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js new file mode 100644 index 0000000000000000000000000000000000000000..df5e0e86644fef57f8c56fffa921d94a4f4ae429 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js @@ -0,0 +1,123 @@ +import { app } from "../../../scripts/app.js"; + +const REPEATER = "Repeater|pysssss"; + +app.registerExtension({ + name: "pysssss.Repeater", + init() { + const graphToPrompt = app.graphToPrompt; + app.graphToPrompt = async function () { + const res = await graphToPrompt.apply(this, arguments); + + const id = Date.now() + "_"; + let u = 0; + + let newNodes = {}; + const newRepeaters = {}; + for (const nodeId in res.output) { + let output = res.output[nodeId]; + if (output.class_type === REPEATER) { + const isMulti = output.inputs.output === "multi"; + if (output.inputs.node_mode === "create") { + // We need to clone the input for every repeat + const orig = res.output[output.inputs.source[0]]; + if (isMulti) { + if (!newRepeaters[nodeId]) { + newRepeaters[nodeId] = []; + newRepeaters[nodeId][output.inputs.repeats - 1] = nodeId; + } + } + for (let i = 0; i < output.inputs.repeats - 1; i++) { + const clonedInputId = id + ++u; + + if (isMulti) { + // If multi create we need to clone the repeater too + newNodes[clonedInputId] = structuredClone(orig); + + output = structuredClone(output); + + const clonedRepeaterId = id + ++u; + newNodes[clonedRepeaterId] = output; + output.inputs["source"][0] = clonedInputId; + + newRepeaters[nodeId][i] = clonedRepeaterId; + } else { + newNodes[clonedInputId] = orig; + } + output.inputs[clonedInputId] = [clonedInputId, output.inputs.source[1]]; + } + } else if (isMulti) { + newRepeaters[nodeId] = Array(output.inputs.repeats).fill(nodeId); + } + } + } + + Object.assign(res.output, newNodes); + newNodes = {}; + + for (const nodeId in res.output) { + const output = res.output[nodeId]; + for (const k in output.inputs) { + const v = output.inputs[k]; + if (v instanceof Array) { + const repeaterId = v[0]; + const source = newRepeaters[repeaterId]; + if (source) { + v[0] = source.pop(); + v[1] = 0; + } + } + } + } + + // Object.assign(res.output, newNodes); + + return res; + }; + }, + beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === REPEATER) { + const SETUP_OUTPUTS = Symbol(); + nodeType.prototype[SETUP_OUTPUTS] = function (repeats) { + if (repeats == null) { + repeats = this.widgets[0].value; + } + while (this.outputs.length > repeats) { + this.removeOutput(repeats); + } + const id = Date.now() + "_"; + let u = 0; + while (this.outputs.length < repeats) { + this.addOutput(id + ++u, "*", { label: "*" }); + } + }; + + const onAdded = nodeType.prototype.onAdded; + nodeType.prototype.onAdded = function () { + const self = this; + const repeatsCb = this.widgets[0].callback; + this.widgets[0].callback = async function () { + const v = (await repeatsCb?.apply(this, arguments)) ?? this.value; + if (self.widgets[1].value === "multi") { + self[SETUP_OUTPUTS](v); + } + return v; + }; + + const outputCb = this.widgets[1].callback; + this.widgets[1].callback = async function () { + const v = (await outputCb?.apply(this, arguments)) ?? this.value; + if (v === "single") { + self.outputs[0].shape = 6; + self[SETUP_OUTPUTS](1); + } else { + delete self.outputs[0].shape; + self[SETUP_OUTPUTS](); + } + return v; + }; + return onAdded?.apply(this, arguments); + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js new file mode 100644 index 0000000000000000000000000000000000000000..2f9faae1c602777d848a7f845f5d3915747da689 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js @@ -0,0 +1,348 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +const REROUTE_PRIMITIVE = "ReroutePrimitive|pysssss"; +const MULTI_PRIMITIVE = "MultiPrimitive|pysssss"; +const LAST_TYPE = Symbol("LastType"); + +app.registerExtension({ + name: "pysssss.ReroutePrimitive", + init() { + // On graph configure, fire onGraphConfigured to create widgets + const graphConfigure = LGraph.prototype.configure; + LGraph.prototype.configure = function () { + const r = graphConfigure.apply(this, arguments); + for (const n of app.graph._nodes) { + if (n.type === REROUTE_PRIMITIVE) { + n.onGraphConfigured(); + } + } + + return r; + }; + + // Hide this node as it is no longer supported + const getNodeTypesCategories = LiteGraph.getNodeTypesCategories; + LiteGraph.getNodeTypesCategories = function() { + return getNodeTypesCategories.apply(this, arguments).filter(c => !c.startsWith("__hidden__")); + } + + const graphToPrompt = app.graphToPrompt; + app.graphToPrompt = async function () { + const res = await graphToPrompt.apply(this, arguments); + + const multiOutputs = []; + for (const nodeId in res.output) { + const output = res.output[nodeId]; + if (output.class_type === MULTI_PRIMITIVE) { + multiOutputs.push({ id: nodeId, inputs: output.inputs }); + } + } + + function permute(outputs) { + function generatePermutations(inputs, currentIndex, currentPermutation, result) { + if (currentIndex === inputs.length) { + result.push({ ...currentPermutation }); + return; + } + + const input = inputs[currentIndex]; + + for (const k in input) { + currentPermutation[currentIndex] = input[k]; + generatePermutations(inputs, currentIndex + 1, currentPermutation, result); + } + } + + const inputs = outputs.map((output) => output.inputs); + const result = []; + const current = new Array(inputs.length); + + generatePermutations(inputs, 0, current, result); + + return outputs.map((output, index) => ({ + ...output, + inputs: result.reduce((p, permutation) => { + const count = Object.keys(p).length; + p["value" + (count || "")] = permutation[index]; + return p; + }, {}), + })); + } + + const permutations = permute(multiOutputs); + for (let i = 0; i < permutations.length; i++) { + res.output[multiOutputs[i].id].inputs = permutations[i].inputs; + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + function addOutputHandler() { + // Finds the first non reroute output node down the chain + nodeType.prototype.getFirstReroutedOutput = function (slot) { + if (nodeData.name === MULTI_PRIMITIVE) { + slot = 0; + } + const links = this.outputs[slot].links; + if (!links) return null; + + const search = []; + for (const l of links) { + const link = app.graph.links[l]; + if (!link) continue; + + const node = app.graph.getNodeById(link.target_id); + if (node.type !== REROUTE_PRIMITIVE && node.type !== MULTI_PRIMITIVE) { + return { node, link }; + } + search.push({ node, link }); + } + + for (const { link, node } of search) { + const r = node.getFirstReroutedOutput(link.target_slot); + if (r) { + return r; + } + } + }; + } + + if (nodeData.name === REROUTE_PRIMITIVE) { + const configure = nodeType.prototype.configure || LGraphNode.prototype.configure; + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + const onAdded = nodeType.prototype.onAdded; + + nodeType.title_mode = LiteGraph.NO_TITLE; + + function hasAnyInput(node) { + for (const input of node.inputs) { + if (input.link) { + return true; + } + } + return false; + } + + // Remove input text + nodeType.prototype.onAdded = function () { + onAdded?.apply(this, arguments); + this.inputs[0].label = ""; + this.outputs[0].label = "value"; + this.setSize(this.computeSize()); + }; + + // Restore any widgets + nodeType.prototype.onGraphConfigured = function () { + if (hasAnyInput(this)) return; + + const outputNode = this.getFirstReroutedOutput(0); + if (outputNode) { + this.checkPrimitiveWidget(outputNode); + } + }; + + // Check if we need to create (or remove) a widget on the node + nodeType.prototype.checkPrimitiveWidget = function ({ node, link }) { + let widgetType = link.type; + let targetLabel = widgetType; + const input = node.inputs[link.target_slot]; + if (input.widget?.config?.[0] instanceof Array) { + targetLabel = input.widget.name; + widgetType = "COMBO"; + } + + if (widgetType in ComfyWidgets) { + if (!this.widgets?.length) { + let v; + if (this.widgets_values?.length) { + v = this.widgets_values[0]; + } + let config = [link.type, {}]; + if (input.widget?.config) { + config = input.widget.config; + } + const { widget } = ComfyWidgets[widgetType](this, "value", config, app); + if (v !== undefined && (!this[LAST_TYPE] || this[LAST_TYPE] === widgetType)) { + widget.value = v; + } + this[LAST_TYPE] = widgetType; + } + } else if (this.widgets) { + this.widgets.length = 0; + } + + return targetLabel; + }; + + // Finds all input nodes from the current reroute + nodeType.prototype.getReroutedInputs = function (slot) { + let nodes = [{ node: this }]; + let node = this; + while (node?.type === REROUTE_PRIMITIVE) { + const input = node.inputs[slot]; + if (input.link) { + const link = app.graph.links[input.link]; + node = app.graph.getNodeById(link.origin_id); + slot = link.origin_slot; + nodes.push({ + node, + link, + }); + } else { + node = null; + } + } + + return nodes; + }; + + addOutputHandler(); + + // Update the type of all reroutes in a chain + nodeType.prototype.changeRerouteType = function (slot, type, label) { + const color = LGraphCanvas.link_type_colors[type]; + const output = this.outputs[slot]; + this.inputs[slot].label = " "; + output.label = label || (type === "*" ? "value" : type); + output.type = type; + + // Process all linked outputs + for (const linkId of output.links || []) { + const link = app.graph.links[linkId]; + if (!link) continue; + link.color = color; + const node = app.graph.getNodeById(link.target_id); + if (node.changeRerouteType) { + // Recursively update reroutes + node.changeRerouteType(link.target_slot, type, label); + } else { + // Validate links to 'real' nodes + const theirType = node.inputs[link.target_slot].type; + if (theirType !== type && theirType !== "*") { + node.disconnectInput(link.target_slot); + } + } + } + + if (this.inputs[slot].link) { + const link = app.graph.links[this.inputs[slot].link]; + if (link) link.color = color; + } + }; + + // Override configure so we can flag that we are configuring to avoid link validation breaking + let configuring = false; + nodeType.prototype.configure = function () { + configuring = true; + const r = configure?.apply(this, arguments); + configuring = false; + + return r; + }; + + Object.defineProperty(nodeType, "title_mode", { + get() { + return app.canvas.current_node?.widgets?.length ? LiteGraph.NORMAL_TITLE : LiteGraph.NO_TITLE; + }, + }); + + nodeType.prototype.onConnectionsChange = function (type, _, connected, link_info) { + // If configuring treat everything as OK as links may not be set by litegraph yet + if (configuring) return; + + const isInput = type === LiteGraph.INPUT; + const slot = isInput ? link_info.target_slot : link_info.origin_slot; + + let targetLabel = null; + let targetNode = null; + let targetType = "*"; + let targetSlot = slot; + + const inputPath = this.getReroutedInputs(slot); + const rootInput = inputPath[inputPath.length - 1]; + const outputNode = this.getFirstReroutedOutput(slot); + if (rootInput.node.type === REROUTE_PRIMITIVE) { + // Our input node is a reroute, so see if we have an output + if (outputNode) { + targetType = outputNode.link.type; + } else if (rootInput.node.widgets) { + rootInput.node.widgets.length = 0; + } + targetNode = rootInput; + targetSlot = rootInput.link?.target_slot ?? slot; + } else { + // We have a real input, so we want to use that type + targetNode = inputPath[inputPath.length - 2]; + targetType = rootInput.node.outputs[rootInput.link.origin_slot].type; + targetSlot = rootInput.link.target_slot; + } + + if (this.widgets && inputPath.length > 1) { + // We have an input node so remove our widget + this.widgets.length = 0; + } + + if (outputNode && rootInput.node.checkPrimitiveWidget) { + // We have an output, check if we need to create a widget + targetLabel = rootInput.node.checkPrimitiveWidget(outputNode); + } + + // Trigger an update of the type to all child nodes + targetNode.node.changeRerouteType(targetSlot, targetType, targetLabel); + + return onConnectionsChange?.apply(this, arguments); + }; + + // When collapsed fix the size to just the dot + const computeSize = nodeType.prototype.computeSize || LGraphNode.prototype.computeSize; + nodeType.prototype.computeSize = function () { + const r = computeSize.apply(this, arguments); + if (this.flags?.collapsed) { + return [1, 25]; + } else if (this.widgets?.length) { + return r; + } else { + let w = 75; + if (this.outputs?.[0]?.label) { + const t = LiteGraph.NODE_TEXT_SIZE * this.outputs[0].label.length * 0.6 + 30; + if (t > w) { + w = t; + } + } + return [w, r[1]]; + } + }; + + // On collapse shrink the node to just a dot + const collapse = nodeType.prototype.collapse || LGraphNode.prototype.collapse; + nodeType.prototype.collapse = function () { + collapse.apply(this, arguments); + this.setSize(this.computeSize()); + requestAnimationFrame(() => { + this.setDirtyCanvas(true, true); + }); + }; + + // Shift the bounding area up slightly as LiteGraph miscalculates it for collapsed nodes + nodeType.prototype.onBounding = function (area) { + if (this.flags?.collapsed) { + area[1] -= 15; + } + }; + } else if (nodeData.name === MULTI_PRIMITIVE) { + addOutputHandler(); + nodeType.prototype.onConnectionsChange = function (type, _, connected, link_info) { + for (let i = 0; i < this.inputs.length - 1; i++) { + if (!this.inputs[i].link) { + this.removeInput(i--); + } + } + if (this.inputs[this.inputs.length - 1].link) { + this.addInput("v" + +new Date(), this.inputs[0].type).label = "value"; + } + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js new file mode 100644 index 0000000000000000000000000000000000000000..dabfa741dd7199be8711e902689912596215f3c1 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js @@ -0,0 +1,81 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; +import { $el } from "../../../scripts/ui.js"; + +const id = "pysssss.ShowImageOnMenu"; +const ext = { + name: id, + async setup(app) { + let enabled = true; + let nodeId = null; + const img = $el("img", { + style: { + width: "100%", + height: "150px", + objectFit: "contain", + }, + }); + const link = $el( + "a", + { + style: { + width: "100%", + height: "150px", + marginTop: "10px", + order: 100, // Place this item last (until someone else has a higher order) + display: "none", + }, + href: "#", + onclick: (e) => { + e.stopPropagation(); + e.preventDefault(); + const node = app.graph.getNodeById(nodeId); + if (!node) return; + app.canvas.centerOnNode(node); + app.canvas.setZoom(1); + }, + }, + [img] + ); + + app.ui.menuContainer.append(link); + + const show = (src, node) => { + img.src = src; + nodeId = Number(node); + link.style.display = "unset"; + }; + + api.addEventListener("executed", ({ detail }) => { + if (!enabled) return; + const images = detail?.output?.images; + if (!images || !images.length) return; + const format = app.getPreviewFormatParam(); + const src = [ + `./view?filename=${encodeURIComponent(images[0].filename)}`, + `type=${images[0].type}`, + `subfolder=${encodeURIComponent(images[0].subfolder)}`, + `t=${+new Date()}${format}`,].join('&'); + show(src, detail.node); + }); + + api.addEventListener("b_preview", ({ detail }) => { + if (!enabled) return; + show(URL.createObjectURL(detail), app.runningNodeId); + }); + + app.ui.settings.addSetting({ + id, + name: "🐍 Show Image On Menu", + defaultValue: true, + type: "boolean", + onChange(value) { + enabled = value; + + if (!enabled) link.style.display = "none"; + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js new file mode 100644 index 0000000000000000000000000000000000000000..8d808de3d54330f2b3b2167bc43653cb5010d282 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js @@ -0,0 +1,57 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +// Displays input text on a node +app.registerExtension({ + name: "pysssss.ShowText", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "ShowText|pysssss") { + function populate(text) { + if (this.widgets) { + for (let i = 1; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = 1; + } + + const v = [...text]; + if (!v[0]) { + v.shift(); + } + for (const list of v) { + const w = ComfyWidgets["STRING"](this, "text2", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = list; + } + + requestAnimationFrame(() => { + const sz = this.computeSize(); + if (sz[0] < this.size[0]) { + sz[0] = this.size[0]; + } + if (sz[1] < this.size[1]) { + sz[1] = this.size[1]; + } + this.onResize?.(sz); + app.graph.setDirtyCanvas(true, false); + }); + } + + // When the node is executed we will be sent the input text, display this in the widget + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + populate.call(this, message.text); + }; + + const onConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function () { + onConfigure?.apply(this, arguments); + if (this.widgets_values?.length) { + populate.call(this, this.widgets_values.slice(+this.widgets_values.length > 1)); + } + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js new file mode 100644 index 0000000000000000000000000000000000000000..8804381e287503c17fb935fb7ef6e4e2aa588bdc --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js @@ -0,0 +1,221 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; + +let setting, guide_setting, guide_config; +const id = "pysssss.SnapToGrid"; +const guide_id = id + ".Guide"; +const guide_config_default = { + lines: { + enabled: false, + fillStyle: "rgba(255, 0, 0, 0.5)", + }, + block: { + enabled: false, + fillStyle: "rgba(0, 0, 255, 0.5)", + }, +} + +/** Wraps the provided function call to set/reset shiftDown when setting is enabled. */ +function wrapCallInSettingCheck(fn) { + if (setting?.value) { + const shift = app.shiftDown; + app.shiftDown = true; + const r = fn(); + app.shiftDown = shift; + return r; + } + return fn(); +} + +const ext = { + name: id, + init() { + if (localStorage.getItem(guide_id) === null) { + localStorage.setItem(guide_id, JSON.stringify(guide_config_default)); + } + guide_config = JSON.parse(localStorage.getItem(guide_id)); + + setting = app.ui.settings.addSetting({ + id, + name: "🐍 Always snap to grid", + defaultValue: false, + type: "boolean", + onChange(value) { + app.canvas.align_to_grid = value; + }, + }); + + guide_setting = app.ui.settings.addSetting({ + id: id + ".Guide", + name: "🐍 Display drag-and-drop guides", + type: (name, setter, value) => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: name, + }), + ]), + $el("td", [ + $el( + "label", + { + textContent: "Lines: ", + style: { + display: "inline-block", + }, + }, + [ + $el("input", { + id: id.replaceAll(".", "-") + "-line-text", + type: "text", + value: guide_config.lines.fillStyle, + onchange: (event) => { + guide_config.lines.fillStyle = event.target.value; + localStorage.setItem(guide_id, JSON.stringify(guide_config)); + } + }), + $el("input", { + id: id.replaceAll(".", "-") + "-line-checkbox", + type: "checkbox", + checked: guide_config.lines.enabled, + onchange: (event) => { + guide_config.lines.enabled = !!event.target.checked; + localStorage.setItem(guide_id, JSON.stringify(guide_config)); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Block: ", + style: { + display: "inline-block", + }, + }, + [ + $el("input", { + id: id.replaceAll(".", "-") + "-block-text", + type: "text", + value: guide_config.block.fillStyle, + onchange: (event) => { + guide_config.block.fillStyle = event.target.value; + localStorage.setItem(guide_id, JSON.stringify(guide_config)); + } + }), + $el("input", { + id: id.replaceAll(".", "-") + '-block-checkbox', + type: "checkbox", + checked: guide_config.block.enabled, + onchange: (event) => { + guide_config.block.enabled = !!event.target.checked; + localStorage.setItem(guide_id, JSON.stringify(guide_config)); + }, + }), + ] + ), + ]), + ]); + } + }); + + // We need to register our hooks after the core snap to grid extension runs + // Do this from the graph configure function so we still get onNodeAdded calls + const configure = LGraph.prototype.configure; + LGraph.prototype.configure = function () { + // Override drawNode to draw the drop position + const drawNode = LGraphCanvas.prototype.drawNode; + LGraphCanvas.prototype.drawNode = function () { + wrapCallInSettingCheck(() => drawNode.apply(this, arguments)); + }; + + // Override node added to add a resize handler to force grid alignment + const onNodeAdded = app.graph.onNodeAdded; + app.graph.onNodeAdded = function (node) { + const r = onNodeAdded?.apply(this, arguments); + const onResize = node.onResize; + node.onResize = function () { + wrapCallInSettingCheck(() => onResize?.apply(this, arguments)); + }; + return r; + }; + + + const groupMove = LGraphGroup.prototype.move; + LGraphGroup.prototype.move = function(deltax, deltay, ignore_nodes) { + wrapCallInSettingCheck(() => groupMove.apply(this, arguments)); + } + + const canvasDrawGroups = LGraphCanvas.prototype.drawGroups; + LGraphCanvas.prototype.drawGroups = function (canvas, ctx) { + wrapCallInSettingCheck(() => canvasDrawGroups.apply(this, arguments)); + } + + const canvasOnGroupAdd = LGraphCanvas.onGroupAdd; + LGraphCanvas.onGroupAdd = function() { + wrapCallInSettingCheck(() => canvasOnGroupAdd.apply(this, arguments)); + } + + return configure.apply(this, arguments); + }; + + // Override drag-and-drop behavior to show orthogonal guide lines around selected node(s) and preview of where the node(s) will be placed + const origDrawNode = LGraphCanvas.prototype.drawNode + LGraphCanvas.prototype.drawNode = function (node, ctx) { + const enabled = guide_config.lines.enabled || guide_config.block.enabled; + if (enabled && app.shiftDown && this.node_dragged && node.id in this.selected_nodes) { + // discretize the canvas into grid + let x = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.pos[0] / LiteGraph.CANVAS_GRID_SIZE); + let y = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.pos[1] / LiteGraph.CANVAS_GRID_SIZE); + + // calculate the width and height of the node + // (also need to shift the y position of the node, depending on whether the title is visible) + x -= node.pos[0]; + y -= node.pos[1]; + let w, h; + if (node.flags.collapsed) { + w = node._collapsed_width; + h = LiteGraph.NODE_TITLE_HEIGHT; + y -= LiteGraph.NODE_TITLE_HEIGHT; + } else { + w = node.size[0]; + h = node.size[1]; + let titleMode = node.constructor.title_mode; + if (titleMode !== LiteGraph.TRANSPARENT_TITLE && titleMode !== LiteGraph.NO_TITLE) { + h += LiteGraph.NODE_TITLE_HEIGHT; + y -= LiteGraph.NODE_TITLE_HEIGHT; + } + } + + // save the original fill style + const f = ctx.fillStyle; + + // draw preview for drag-and-drop (rectangle to show where the node will be placed) + if (guide_config.block.enabled) { + ctx.fillStyle = guide_config.block.fillStyle; + ctx.fillRect(x, y, w, h); + } + + // add guide lines around node (arbitrarily long enough to span most workflows) + if (guide_config.lines.enabled) { + const xd = 10000; + const yd = 10000; + const thickness = 3; + ctx.fillStyle = guide_config.lines.fillStyle; + ctx.fillRect(x - xd, y, 2*xd, thickness); + ctx.fillRect(x, y - yd, thickness, 2*yd); + ctx.fillRect(x - xd, y + h, 2*xd, thickness); + ctx.fillRect(x + w, y - yd, thickness, 2*yd); + } + + // restore the original fill style + ctx.fillStyle = f; + } + + return origDrawNode.apply(this, arguments); + }; + }, +}; + +app.registerExtension(ext); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js new file mode 100644 index 0000000000000000000000000000000000000000..1a71c255c52a48097e76626dd0e272ac6df65fb6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js @@ -0,0 +1,33 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +// Displays input text on a node + +app.registerExtension({ + name: "pysssss.StringFunction", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "StringFunction|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + + if (this.widgets) { + const pos = this.widgets.findIndex((w) => w.name === "result"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = pos; + } + } + + const w = ComfyWidgets["STRING"](this, "result", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = message.text; + + this.onResize?.(this.size); + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js new file mode 100644 index 0000000000000000000000000000000000000000..000253a45cfb8e1cde37ed81bd03aa1e8b5adfbd --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js @@ -0,0 +1,30 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.SwapResolution", + async beforeRegisterNodeDef(nodeType, nodeData) { + const inputs = { ...nodeData.input?.required, ...nodeData.input?.optional }; + if (inputs.width && inputs.height) { + const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = origGetExtraMenuOptions?.apply?.(this, arguments); + + options.push( + { + content: "Swap width/height", + callback: () => { + const w = this.widgets.find((w) => w.name === "width"); + const h = this.widgets.find((w) => w.name === "height"); + const a = w.value; + w.value = h.value; + h.value = a; + app.graph.setDirtyCanvas(true); + }, + }, + null + ); + + return r; + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/systemNotification.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/systemNotification.js new file mode 100644 index 0000000000000000000000000000000000000000..4fbd518102aa188dcdefbe2e279a4053f0d7fea7 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/systemNotification.js @@ -0,0 +1,49 @@ +import { app } from "../../../scripts/app.js"; + +const notificationSetup = () => { + if (!("Notification" in window)) { + console.log("This browser does not support notifications."); + alert("This browser does not support notifications."); + return; + } + if (Notification.permission === "denied") { + console.log("Notifications are blocked. Please enable them in your browser settings."); + alert("Notifications are blocked. Please enable them in your browser settings."); + return; + } + if (Notification.permission !== "granted") { + Notification.requestPermission(); + } + return true; +}; + +app.registerExtension({ + name: "pysssss.SystemNotification", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "SystemNotification|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = async function () { + onExecuted?.apply(this, arguments); + const mode = this.widgets.find((w) => w.name === "mode"); + const message = this.widgets.find((w) => w.name === "message"); + + if (mode.value === "on empty queue") { + if (app.ui.lastQueueSize !== 0) { + await new Promise((r) => setTimeout(r, 500)); + } + if (app.ui.lastQueueSize !== 0) { + return; + } + } + if (!notificationSetup()) return; + const notification = new Notification("ComfyUI", { body: message.value ?? "Your notification has triggered." }); + }; + + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + onNodeCreated?.apply(this, arguments); + notificationSetup(); + }; + } + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js new file mode 100644 index 0000000000000000000000000000000000000000..f3e5ce39a9be64f9944f1ddb48c7321e7906280b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js @@ -0,0 +1,36 @@ +import { app } from "../../../scripts/app.js"; + +const id = "pysssss.UseNumberInputPrompt"; +const ext = { + name: id, + async setup(app) { + const prompt = LGraphCanvas.prototype.prompt; + + const setting = app.ui.settings.addSetting({ + id, + name: "🐍 Use number input on value entry", + defaultValue: false, + type: "boolean", + }); + + LGraphCanvas.prototype.prompt = function () { + const dialog = prompt.apply(this, arguments); + if (setting.value && typeof arguments[1] === "number") { + // If this should be a number then update the imput + const input = dialog.querySelector("input"); + input.type = "number"; + + // Add constraints + const widget = app.canvas.node_widget?.[1]; + if (widget?.options) { + for (const prop of ["min", "max", "step"]) { + if (widget.options[prop]) input[prop] = widget.options[prop]; + } + } + } + return dialog; + }; + }, +}; + +app.registerExtension(ext); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js new file mode 100644 index 0000000000000000000000000000000000000000..b10b85900d366dfebb6d53941fd2a5607e59f855 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js @@ -0,0 +1,285 @@ +import { app } from "../../../scripts/app.js"; +import { $el, ComfyDialog } from "../../../scripts/ui.js"; + +// Allows you to specify custom default values for any widget on any node + +const id = "pysssss.WidgetDefaults"; +const nodeDataKey = Symbol(); + +app.registerExtension({ + name: id, + beforeRegisterNodeDef(nodeType, nodeData) { + nodeType[nodeDataKey] = nodeData; + }, + setup() { + let defaults; + let regexDefaults; + let setting; + + const getNodeDefaults = (node, defaults) => { + const nodeDefaults = defaults[node.type] ?? {}; + const propSetBy = {}; + + Object.keys(regexDefaults) + .filter((r) => new RegExp(r).test(node.type)) + .reduce((p, n) => { + const props = regexDefaults[n]; + for (const k in props) { + // Use the longest matching key as its probably the most specific + if (!(k in nodeDefaults) || (k in propSetBy && n.length > propSetBy[k].length)) { + propSetBy[k] = n; + nodeDefaults[k] = props[k]; + } + } + return p; + }, nodeDefaults); + + return nodeDefaults; + }; + + const applyDefaults = (defaults) => { + for (const node of Object.values(LiteGraph.registered_node_types)) { + const nodeData = node[nodeDataKey]; + if (!nodeData) continue; + const nodeDefaults = getNodeDefaults(node, defaults); + if (!nodeDefaults) continue; + const inputs = { ...(nodeData.input?.required || {}), ...(nodeData.input?.optional || {}) }; + + for (const w in nodeDefaults) { + const widgetDef = inputs[w]; + if (widgetDef) { + let v = nodeDefaults[w]; + if (widgetDef[0] === "INT" || widgetDef[0] === "FLOAT") { + v = +v; + } + if (widgetDef[1]) { + widgetDef[1].default = v; + } else { + widgetDef[1] = { default: v }; + } + } + } + } + }; + + const getDefaults = () => { + let items; + regexDefaults = {}; + try { + items = JSON.parse(setting.value); + items = items.reduce((p, n) => { + if (n.node.startsWith("/") && n.node.endsWith("/")) { + const name = n.node.substring(1, n.node.length - 1); + try { + // Validate regex + new RegExp(name); + + if (!regexDefaults[name]) regexDefaults[name] = {}; + regexDefaults[name][n.widget] = n.value; + } catch (error) {} + } + + if (!p[n.node]) p[n.node] = {}; + p[n.node][n.widget] = n.value; + return p; + }, {}); + } catch (error) {} + if (!items) { + items = {}; + } + applyDefaults(items); + return items; + }; + + const onNodeAdded = app.graph.onNodeAdded; + app.graph.onNodeAdded = function (node) { + onNodeAdded?.apply?.(this, arguments); + + // See if we have any defaults for this type of node + const nodeDefaults = getNodeDefaults(node.constructor, defaults); + if (!nodeDefaults) return; + + // Dont run if they are pre-configured nodes from load/pastes + const stack = new Error().stack; + if (stack.includes("pasteFromClipboard") || stack.includes("loadGraphData")) { + return; + } + + for (const k in nodeDefaults) { + if (k.startsWith("property.")) { + const name = k.substring(9); + let v = nodeDefaults[k]; + // Special handling for some built in values + if (name in node || ["color", "bgcolor", "title"].includes(name)) { + node[name] = v; + } else { + // Try using the correct type + if (!node.properties) node.properties = {}; + if (typeof node.properties[name] === "number") v = +v; + else if (typeof node.properties[name] === "boolean") v = v === "true"; + else if (v === "true") v = true; + + node.properties[name] = v; + } + } + } + }; + + class WidgetDefaultsDialog extends ComfyDialog { + constructor() { + super(); + this.element.classList.add("comfy-manage-templates"); + this.grid = $el( + "div", + { + style: { + display: "grid", + gridTemplateColumns: "1fr auto auto auto", + gap: "5px", + }, + className: "pysssss-widget-defaults", + }, + [ + $el("label", { + textContent: "Node Class", + }), + $el("label", { + textContent: "Widget Name", + }), + $el("label", { + textContent: "Default Value", + }), + $el("label"), + (this.rows = $el("div", { + style: { + display: "contents", + }, + })), + ] + ); + } + + createButtons() { + const btns = super.createButtons(); + btns[0].textContent = "Cancel"; + btns.unshift( + $el("button", { + type: "button", + textContent: "Add New", + onclick: () => this.addRow(), + }), + $el("button", { + type: "button", + textContent: "Save", + onclick: () => this.save(), + }) + ); + return btns; + } + + addRow(node = "", widget = "", value = "") { + let nameInput; + this.rows.append( + $el( + "div", + { + style: { + display: "contents", + }, + className: "pysssss-widget-defaults-row", + }, + [ + $el("input", { + placeholder: "e.g. CheckpointLoaderSimple", + value: node, + }), + $el("input", { + placeholder: "e.g. ckpt_name", + value: widget, + $: (el) => (nameInput = el), + }), + $el("input", { + placeholder: "e.g. myBestModel.safetensors", + value, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + nameInput.value = ""; + e.target.parentElement.style.display = "none"; + }, + }), + ] + ) + ); + } + + save() { + const rows = this.rows.children; + const items = []; + + for (const row of rows) { + const inputs = row.querySelectorAll("input"); + const node = inputs[0].value.trim(); + const widget = inputs[1].value.trim(); + const value = inputs[2].value; + if (node && widget) { + items.push({ node, widget, value }); + } + } + + setting.value = JSON.stringify(items); + defaults = getDefaults(); + + this.close(); + } + + show() { + this.rows.replaceChildren(); + for (const nodeName in defaults) { + const node = defaults[nodeName]; + for (const widgetName in node) { + this.addRow(nodeName, widgetName, node[widgetName]); + } + } + + this.addRow(); + super.show(this.grid); + } + } + + setting = app.ui.settings.addSetting({ + id, + name: "🐍 Widget Defaults", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: "🐍 Widget & Property Defaults:", + }), + ]), + $el("td", [ + $el("button", { + textContent: "Manage", + onclick: () => { + app.ui.settings.element.close(); + const dialog = new WidgetDefaultsDialog(); + dialog.show(); + }, + style: { + fontSize: "14px", + }, + }), + ]), + ]); + }, + }); + defaults = getDefaults(); + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js new file mode 100644 index 0000000000000000000000000000000000000000..62475a4bca32eb4370c5676a9d5c9e84fecab4bd --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js @@ -0,0 +1,635 @@ +import { app } from "../../../scripts/app.js"; +import { importA1111 } from "../../../scripts/pnginfo.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +let getDrawTextConfig = null; +let fileInput; + +class WorkflowImage { + static accept = ""; + + getBounds() { + // Calculate the min max bounds for the nodes on the graph + const bounds = app.graph._nodes.reduce( + (p, n) => { + if (n.pos[0] < p[0]) p[0] = n.pos[0]; + if (n.pos[1] < p[1]) p[1] = n.pos[1]; + const bounds = n.getBounding(); + const r = n.pos[0] + bounds[2]; + const b = n.pos[1] + bounds[3]; + if (r > p[2]) p[2] = r; + if (b > p[3]) p[3] = b; + return p; + }, + [99999, 99999, -99999, -99999] + ); + + bounds[0] -= 100; + bounds[1] -= 100; + bounds[2] += 100; + bounds[3] += 100; + return bounds; + } + + saveState() { + this.state = { + scale: app.canvas.ds.scale, + width: app.canvas.canvas.width, + height: app.canvas.canvas.height, + offset: app.canvas.ds.offset, + transform: app.canvas.canvas.getContext('2d').getTransform(), // Save the original transformation matrix + }; + } + + restoreState() { + app.canvas.ds.scale = this.state.scale; + app.canvas.canvas.width = this.state.width; + app.canvas.canvas.height = this.state.height; + app.canvas.ds.offset = this.state.offset; + app.canvas.canvas.getContext('2d').setTransform(this.state.transform); // Reapply the original transformation matrix + } + + updateView(bounds) { + app.canvas.ds.scale = 1; + app.canvas.canvas.width = bounds[2] - bounds[0]; + app.canvas.canvas.height = bounds[3] - bounds[1]; + app.canvas.ds.offset = [-bounds[0], -bounds[1]]; + } + + getDrawTextConfig(_, widget) { + return { + x: 10, + y: widget.last_y + 10, + resetTransform: false, + }; + } + + async export(includeWorkflow) { + // Save the current state of the canvas + this.saveState(); + // Update to render the whole workflow + this.updateView(this.getBounds()); + + // Flag that we are saving and render the canvas + getDrawTextConfig = this.getDrawTextConfig; + app.canvas.draw(true, true); + getDrawTextConfig = null; + + // Generate a blob of the image containing the workflow + const blob = await this.getBlob(includeWorkflow ? JSON.stringify(app.graph.serialize()) : undefined); + + // Restore initial state and redraw + this.restoreState(); + app.canvas.draw(true, true); + + // Download the generated image + this.download(blob); + } + + download(blob) { + const url = URL.createObjectURL(blob); + const a = document.createElement("a"); + Object.assign(a, { + href: url, + download: "workflow." + this.extension, + style: "display: none", + }); + document.body.append(a); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + + static import() { + if (!fileInput) { + fileInput = document.createElement("input"); + Object.assign(fileInput, { + type: "file", + style: "display: none", + onchange: () => { + app.handleFile(fileInput.files[0]); + }, + }); + document.body.append(fileInput); + } + fileInput.accept = WorkflowImage.accept; + fileInput.click(); + } +} + +class PngWorkflowImage extends WorkflowImage { + static accept = ".png,image/png"; + extension = "png"; + + n2b(n) { + return new Uint8Array([(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff]); + } + + joinArrayBuffer(...bufs) { + const result = new Uint8Array(bufs.reduce((totalSize, buf) => totalSize + buf.byteLength, 0)); + bufs.reduce((offset, buf) => { + result.set(buf, offset); + return offset + buf.byteLength; + }, 0); + return result; + } + + crc32(data) { + const crcTable = + PngWorkflowImage.crcTable || + (PngWorkflowImage.crcTable = (() => { + let c; + const crcTable = []; + for (let n = 0; n < 256; n++) { + c = n; + for (let k = 0; k < 8; k++) { + c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1; + } + crcTable[n] = c; + } + return crcTable; + })()); + let crc = 0 ^ -1; + for (let i = 0; i < data.byteLength; i++) { + crc = (crc >>> 8) ^ crcTable[(crc ^ data[i]) & 0xff]; + } + return (crc ^ -1) >>> 0; + } + + async getBlob(workflow) { + return new Promise((r) => { + app.canvasEl.toBlob(async (blob) => { + if (workflow) { + // If we have a workflow embed it in the PNG + const buffer = await blob.arrayBuffer(); + const typedArr = new Uint8Array(buffer); + const view = new DataView(buffer); + + const data = new TextEncoder().encode(`tEXtworkflow\0${workflow}`); + const chunk = this.joinArrayBuffer(this.n2b(data.byteLength - 4), data, this.n2b(this.crc32(data))); + + const sz = view.getUint32(8) + 20; + const result = this.joinArrayBuffer(typedArr.subarray(0, sz), chunk, typedArr.subarray(sz)); + + blob = new Blob([result], { type: "image/png" }); + } + + r(blob); + }); + }); + } +} + +class DataReader { + /** @type {DataView} */ + view; + /** @type {boolean | undefined} */ + littleEndian; + offset = 0; + + /** + * @param {DataView} view + */ + constructor(view) { + this.view = view; + } + + /** + * Reads N bytes and increments the offset + * @param {1 | 2 | 4 | 8} size + */ + read(size, signed = false, littleEndian = undefined) { + const v = this.peek(size, signed, littleEndian); + this.offset += size; + return v; + } + + /** + * Reads N bytes + * @param {1 | 2 | 4 | 8} size + */ + peek(size, signed = false, littleEndian = undefined) { + this.view.getBigInt64; + let m = ""; + if (size === 8) m += "Big"; + m += signed ? "Int" : "Uint"; + m += size * 8; + m = "get" + m; + if (!this.view[m]) { + throw new Error("Method not found: " + m); + } + + return this.view[m](this.offset, littleEndian == null ? this.littleEndian : littleEndian); + } + + /** + * Seeks to the specified position or by the number of bytes specified relative to the current offset + * @param {number} pos + * @param {boolean} relative + */ + seek(pos, relative = true) { + if (relative) { + this.offset += pos; + } else { + this.offset = pos; + } + } +} + +class Tiff { + /** @type {DataReader} */ + #reader; + #start; + + readExif(reader) { + const TIFF_MARKER = 0x2a; + const EXIF_IFD = 0x8769; + + this.#reader = reader; + this.#start = this.#reader.offset; + this.#readEndianness(); + + if (!this.#reader.read(2) === TIFF_MARKER) { + throw new Error("Invalid TIFF: Marker not found."); + } + + const dirOffset = this.#reader.read(4); + this.#reader.seek(this.#start + dirOffset, false); + + for (const t of this.#readTags()) { + if (t.id === EXIF_IFD) { + return this.#readExifTag(t); + } + } + throw new Error("No EXIF: TIFF Exif IFD tag not found"); + } + + #readUserComment(tag) { + this.#reader.seek(this.#start + tag.offset, false); + const encoding = this.#reader.read(8); + if (encoding !== 0x45444f43494e55n) { + throw new Error("Unable to read non-Unicode data"); + } + const decoder = new TextDecoder("utf-16be"); + return decoder.decode(new DataView(this.#reader.view.buffer, this.#reader.offset, tag.count - 8)); + } + + #readExifTag(exifTag) { + const EXIF_USER_COMMENT = 0x9286; + + this.#reader.seek(this.#start + exifTag.offset, false); + for (const t of this.#readTags()) { + if (t.id === EXIF_USER_COMMENT) { + return this.#readUserComment(t); + } + } + throw new Error("No embedded data: UserComment Exif tag not found"); + } + + *#readTags() { + const count = this.#reader.read(2); + for (let i = 0; i < count; i++) { + yield { + id: this.#reader.read(2), + type: this.#reader.read(2), + count: this.#reader.read(4), + offset: this.#reader.read(4), + }; + } + } + + #readEndianness() { + const II = 0x4949; + const MM = 0x4d4d; + const endianness = this.#reader.read(2); + if (endianness === II) { + this.#reader.littleEndian = true; + } else if (endianness === MM) { + this.#reader.littleEndian = false; + } else { + throw new Error("Invalid JPEG: Endianness marker not found."); + } + } +} + +class Jpeg { + /** @type {DataReader} */ + #reader; + + /** + * @param {ArrayBuffer} buffer + */ + readExif(buffer) { + const JPEG_MARKER = 0xffd8; + const EXIF_SIG = 0x45786966; + + this.#reader = new DataReader(new DataView(buffer)); + if (!this.#reader.read(2) === JPEG_MARKER) { + throw new Error("Invalid JPEG: SOI not found."); + } + + const app0 = this.#readAppMarkerId(); + if (app0 !== 0) { + throw new Error(`Invalid JPEG: APP0 not found [found: ${app0}].`); + } + + this.#consumeAppSegment(); + const app1 = this.#readAppMarkerId(); + if (app1 !== 1) { + throw new Error(`No EXIF: APP1 not found [found: ${app0}].`); + } + + // Skip size + this.#reader.seek(2); + + if (this.#reader.read(4) !== EXIF_SIG) { + throw new Error(`No EXIF: Invalid EXIF header signature.`); + } + if (this.#reader.read(2) !== 0) { + throw new Error(`No EXIF: Invalid EXIF header.`); + } + + return new Tiff().readExif(this.#reader); + } + + #readAppMarkerId() { + const APP0_MARKER = 0xffe0; + return this.#reader.read(2) - APP0_MARKER; + } + + #consumeAppSegment() { + this.#reader.seek(this.#reader.read(2) - 2); + } +} + +class SvgWorkflowImage extends WorkflowImage { + static accept = ".svg,image/svg+xml"; + extension = "svg"; + + static init() { + // Override file handling to allow drag & drop of SVG + const handleFile = app.handleFile; + app.handleFile = async function (file) { + if (file && (file.type === "image/svg+xml" || file.name?.endsWith(".svg"))) { + const reader = new FileReader(); + reader.onload = () => { + // Extract embedded workflow from desc tags + const descEnd = reader.result.lastIndexOf(""); + if (descEnd !== -1) { + const descStart = reader.result.lastIndexOf("", descEnd); + if (descStart !== -1) { + const json = reader.result.substring(descStart + 6, descEnd); + this.loadGraphData(JSON.parse(SvgWorkflowImage.unescapeXml(json))); + } + } + }; + reader.readAsText(file); + return; + } else if (file && (file.type === "image/jpeg" || file.name?.endsWith(".jpg") || file.name?.endsWith(".jpeg"))) { + if ( + await new Promise((resolve) => { + try { + // This shouldnt go in here but it's easier than refactoring handleFile + const reader = new FileReader(); + reader.onload = async () => { + try { + const value = new Jpeg().readExif(reader.result); + importA1111(app.graph, value); + resolve(true); + } catch (error) { + resolve(false); + } + }; + reader.onerror = () => resolve(false); + reader.readAsArrayBuffer(file); + } catch (error) { + resolve(false); + } + }) + ) { + return; + } + } + return handleFile.apply(this, arguments); + }; + } + + static escapeXml(unsafe) { + return unsafe.replaceAll("&", "&").replaceAll("<", "<").replaceAll(">", ">"); + } + + static unescapeXml(safe) { + return safe.replaceAll("&", "&").replaceAll("<", "<").replaceAll(">", ">"); + } + + getDrawTextConfig(_, widget) { + return { + x: parseInt(widget.inputEl.style.left), + y: parseInt(widget.inputEl.style.top), + resetTransform: true, + }; + } + + saveState() { + super.saveState(); + this.state.ctx = app.canvas.ctx; + } + + restoreState() { + super.restoreState(); + app.canvas.ctx = this.state.ctx; + } + + updateView(bounds) { + super.updateView(bounds); + this.createSvgCtx(bounds); + } + + createSvgCtx(bounds) { + const ctx = this.state.ctx; + const svgCtx = (this.svgCtx = new C2S(bounds[2] - bounds[0], bounds[3] - bounds[1])); + svgCtx.canvas.getBoundingClientRect = function () { + return { width: svgCtx.width, height: svgCtx.height }; + }; + + // Override the c2s handling of images to draw images as canvases + const drawImage = svgCtx.drawImage; + svgCtx.drawImage = function (...args) { + const image = args[0]; + // If we are an image node and not a datauri then we need to replace with a canvas + // we cant convert to data uri here as it is an async process + if (image.nodeName === "IMG" && !image.src.startsWith("data:image/")) { + const canvas = document.createElement("canvas"); + canvas.width = image.width; + canvas.height = image.height; + const imgCtx = canvas.getContext("2d"); + imgCtx.drawImage(image, 0, 0); + args[0] = canvas; + } + + return drawImage.apply(this, args); + }; + + // Implement missing required functions + svgCtx.getTransform = function () { + return ctx.getTransform(); + }; + svgCtx.resetTransform = function () { + return ctx.resetTransform(); + }; + svgCtx.roundRect = svgCtx.rect; + app.canvas.ctx = svgCtx; + } + + getBlob(workflow) { + let svg = this.svgCtx + .getSerializedSvg(true) + .replace("", `${SvgWorkflowImage.escapeXml(workflow)}`); + } + + return new Blob([svg], { type: "image/svg+xml" }); + } +} + +app.registerExtension({ + name: "pysssss.WorkflowImage", + init() { + // https://codepen.io/peterhry/pen/nbMaYg + function wrapText(context, text, x, y, maxWidth, lineHeight) { + var words = text.split(" "), + line = "", + i, + test, + metrics; + + for (i = 0; i < words.length; i++) { + test = words[i]; + metrics = context.measureText(test); + while (metrics.width > maxWidth) { + // Determine how much of the word will fit + test = test.substring(0, test.length - 1); + metrics = context.measureText(test); + } + if (words[i] != test) { + words.splice(i + 1, 0, words[i].substr(test.length)); + words[i] = test; + } + + test = line + words[i] + " "; + metrics = context.measureText(test); + + if (metrics.width > maxWidth && i > 0) { + context.fillText(line, x, y); + line = words[i] + " "; + y += lineHeight; + } else { + line = test; + } + } + + context.fillText(line, x, y); + } + + const stringWidget = ComfyWidgets.STRING; + // Override multiline string widgets to draw text using canvas while saving as svg + ComfyWidgets.STRING = function () { + const w = stringWidget.apply(this, arguments); + if (w.widget && w.widget.type === "customtext") { + const draw = w.widget.draw; + w.widget.draw = function (ctx) { + draw.apply(this, arguments); + if (this.inputEl.hidden) return; + + if (getDrawTextConfig) { + const config = getDrawTextConfig(ctx, this); + const t = ctx.getTransform(); + ctx.save(); + if (config.resetTransform) { + ctx.resetTransform(); + } + + const style = document.defaultView.getComputedStyle(this.inputEl, null); + const x = config.x; + const y = config.y; + const w = parseInt(this.inputEl.style.width); + const h = parseInt(this.inputEl.style.height); + ctx.fillStyle = style.getPropertyValue("background-color"); + ctx.fillRect(x, y, w, h); + + ctx.fillStyle = style.getPropertyValue("color"); + ctx.font = style.getPropertyValue("font"); + + const line = t.d * 12; + const split = this.inputEl.value.split("\n"); + let start = y; + for (const l of split) { + start += line; + wrapText(ctx, l, x + 4, start, w, line); + } + + ctx.restore(); + } + }; + } + return w; + }; + }, + setup() { + const script = document.createElement("script"); + script.onload = function () { + const formats = [SvgWorkflowImage, PngWorkflowImage]; + for (const f of formats) { + f.init?.call(); + WorkflowImage.accept += (WorkflowImage.accept ? "," : "") + f.accept; + } + + // Add canvas menu options + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + + options.push(null, { + content: "Workflow Image", + submenu: { + options: [ + { + content: "Import", + callback: () => { + WorkflowImage.import(); + }, + }, + { + content: "Export", + submenu: { + options: formats.flatMap((f) => [ + { + content: f.name.replace("WorkflowImage", "").toLocaleLowerCase(), + callback: () => { + new f().export(true); + }, + }, + { + content: f.name.replace("WorkflowImage", "").toLocaleLowerCase() + " (no embedded workflow)", + callback: () => { + new f().export(); + }, + }, + ]), + }, + }, + ], + }, + }); + return options; + }; + }; + + script.src = new URL(`assets/canvas2svg.js`, import.meta.url); + document.body.append(script); + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js new file mode 100644 index 0000000000000000000000000000000000000000..b0ff744e8f92d6cfa87faf56aaf113f1bbd9c8eb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js @@ -0,0 +1,342 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; +import { $el } from "../../../scripts/ui.js"; + +// Adds workflow management +// Original implementation by https://github.com/i-h4x +// Thanks for permission to reimplement as an extension + +const style = ` +#comfy-save-button, #comfy-load-button { + position: relative; + overflow: hidden; +} +.pysssss-workflow-arrow { + position: absolute; + top: 0; + bottom: 0; + right: 0; + font-size: 12px; + display: flex; + align-items: center; + width: 24px; + justify-content: center; + background: rgba(255,255,255,0.1); +} +.pysssss-workflow-arrow:after { + content: "▼"; +} +.pysssss-workflow-arrow:hover { + filter: brightness(1.6); + background-color: var(--comfy-menu-bg); +} +.pysssss-workflow-load .litemenu-entry:not(.has_submenu):before, +.pysssss-workflow-load ~ .litecontextmenu .litemenu-entry:not(.has_submenu):before { + content: "🎛️"; + padding-right: 5px; +} +.pysssss-workflow-load .litemenu-entry.has_submenu:before, +.pysssss-workflow-load ~ .litecontextmenu .litemenu-entry.has_submenu:before { + content: "📂"; + padding-right: 5px; + position: relative; + top: -1px; +} +.pysssss-workflow-popup ~ .litecontextmenu { + transform: scale(1.3); +} +`; + +async function getWorkflows() { + const response = await api.fetchApi("/pysssss/workflows", { cache: "no-store" }); + return await response.json(); +} + +async function getWorkflow(name) { + const response = await api.fetchApi(`/pysssss/workflows/${encodeURIComponent(name)}`, { cache: "no-store" }); + return await response.json(); +} + +async function saveWorkflow(name, workflow, overwrite) { + try { + const response = await api.fetchApi("/pysssss/workflows", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ name, workflow, overwrite }), + }); + if (response.status === 201) { + return true; + } + if (response.status === 409) { + return false; + } + throw new Error(response.statusText); + } catch (error) { + console.error(error); + } +} + +class PysssssWorkflows { + async load() { + this.workflows = await getWorkflows(); + if(this.workflows.length) { + this.workflows.sort(); + } + this.loadMenu.style.display = this.workflows.length ? "flex" : "none"; + } + + getMenuOptions(callback) { + const menu = []; + const directories = new Map(); + for (const workflow of this.workflows || []) { + const path = workflow.split("/"); + let parent = menu; + let currentPath = ""; + for (let i = 0; i < path.length - 1; i++) { + currentPath += "/" + path[i]; + let newParent = directories.get(currentPath); + if (!newParent) { + newParent = { + title: path[i], + has_submenu: true, + submenu: { + options: [], + }, + }; + parent.push(newParent); + newParent = newParent.submenu.options; + directories.set(currentPath, newParent); + } + parent = newParent; + } + parent.push({ + title: path[path.length - 1], + callback: () => callback(workflow), + }); + } + return menu; + } + + constructor() { + function addWorkflowMenu(type, getOptions) { + return $el("div.pysssss-workflow-arrow", { + parent: document.getElementById(`comfy-${type}-button`), + onclick: (e) => { + e.preventDefault(); + e.stopPropagation(); + + LiteGraph.closeAllContextMenus(); + const menu = new LiteGraph.ContextMenu( + getOptions(), + { + event: e, + scale: 1.3, + }, + window + ); + menu.root.classList.add("pysssss-workflow-popup"); + menu.root.classList.add(`pysssss-workflow-${type}`); + }, + }); + } + + this.loadMenu = addWorkflowMenu("load", () => + this.getMenuOptions(async (workflow) => { + const json = await getWorkflow(workflow); + app.loadGraphData(json); + }) + ); + addWorkflowMenu("save", () => { + return [ + { + title: "Save as", + callback: () => { + let filename = prompt("Enter filename", this.workflowName || "workflow"); + if (filename) { + if (!filename.toLowerCase().endsWith(".json")) { + filename += ".json"; + } + + this.workflowName = filename; + + const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string + const blob = new Blob([json], { type: "application/json" }); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: filename, + style: { display: "none" }, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + }, + }, + { + title: "Save to workflows", + callback: async () => { + const name = prompt("Enter filename", this.workflowName || "workflow"); + if (name) { + this.workflowName = name; + + const data = app.graph.serialize(); + if (!(await saveWorkflow(name, data))) { + if (confirm("A workspace with this name already exists, do you want to overwrite it?")) { + await saveWorkflow(name, app.graph.serialize(), true); + } else { + return; + } + } + await this.load(); + } + }, + }, + ]; + }); + this.load(); + + const handleFile = app.handleFile; + const self = this; + app.handleFile = function (file) { + if (file?.name?.endsWith(".json")) { + self.workflowName = file.name; + } else { + self.workflowName = null; + } + return handleFile.apply(this, arguments); + }; + } +} + +const refreshComboInNodes = app.refreshComboInNodes; +let workflows; + +async function sendToWorkflow(img, workflow) { + const graph = !workflow ? app.graph.serialize() : await getWorkflow(workflow); + const nodes = graph.nodes.filter((n) => n.type === "LoadImage"); + let targetNode; + if (nodes.length === 0) { + alert("To send the image to another workflow, that workflow must have a LoadImage node."); + return; + } else if (nodes.length > 1) { + targetNode = nodes.find((n) => n.title?.toLowerCase().includes("input")); + if (!targetNode) { + targetNode = nodes[0]; + alert( + "The target workflow has multiple LoadImage nodes, include 'input' in the name of the one you want to use. The first one will be used here." + ); + } + } else { + targetNode = nodes[0]; + } + + const blob = await (await fetch(img.src)).blob(); + const name = + (workflow || "sendtoworkflow").replace(/\//g, "_") + + "-" + + +new Date() + + new URLSearchParams(img.src.split("?")[1]).get("filename"); + const body = new FormData(); + body.append("image", new File([blob], name)); + + const resp = await api.fetchApi("/upload/image", { + method: "POST", + body, + }); + + if (resp.status === 200) { + await refreshComboInNodes.call(app); + targetNode.widgets_values[0] = name; + app.loadGraphData(graph); + app.graph.getNodeById(targetNode.id); + } else { + alert(resp.status + " - " + resp.statusText); + } +} + +app.registerExtension({ + name: "pysssss.Workflows", + init() { + $el("style", { + textContent: style, + parent: document.head, + }); + }, + async setup() { + workflows = new PysssssWorkflows(); + app.refreshComboInNodes = function () { + workflows.load(); + refreshComboInNodes.apply(this, arguments); + }; + + const comfyDefault = "[ComfyUI Default]"; + const defaultWorkflow = app.ui.settings.addSetting({ + id: "pysssss.Workflows.Default", + name: "🐍 Default Workflow", + defaultValue: comfyDefault, + type: "combo", + options: (value) => + [comfyDefault, ...workflows.workflows].map((m) => ({ + value: m, + text: m, + selected: m === value, + })), + }); + + document.getElementById("comfy-load-default-button").onclick = async function () { + if ( + localStorage["Comfy.Settings.Comfy.ConfirmClear"] === "false" || + confirm(`Load default workflow (${defaultWorkflow.value})?`) + ) { + if (defaultWorkflow.value === comfyDefault) { + app.loadGraphData(); + } else { + const json = await getWorkflow(defaultWorkflow.value); + app.loadGraphData(json); + } + } + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = getExtraMenuOptions?.apply?.(this, arguments); + let img; + if (this.imageIndex != null) { + // An image is selected so select that + img = this.imgs[this.imageIndex]; + } else if (this.overIndex != null) { + // No image is selected but one is hovered + img = this.imgs[this.overIndex]; + } + + if (img) { + let pos = options.findIndex((o) => o.content === "Save Image"); + if (pos === -1) { + pos = 0; + } else { + pos++; + } + + options.splice(pos, 0, { + content: "Send to workflow", + has_submenu: true, + submenu: { + options: [ + { callback: () => sendToWorkflow(img), title: "[Current workflow]" }, + ...workflows.getMenuOptions(sendToWorkflow.bind(null, img)), + ], + }, + }); + } + + return r; + }; + }, +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..3877ae0a7ff6f94ac222fd704e112723db776114 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/README.md b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d08ea26137a173b822f0e1c7a7e74c77c0963a20 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/README.md @@ -0,0 +1,490 @@ +[![Youtube Badge](https://img.shields.io/badge/Youtube-FF0000?style=for-the-badge&logo=Youtube&logoColor=white&link=https://www.youtube.com/watch?v=AccoxDZIg3Y&list=PL_Ej2RDzjQLGfEeizq4GISeY3FtVyFmGP)](https://www.youtube.com/watch?v=AccoxDZIg3Y&list=PL_Ej2RDzjQLGfEeizq4GISeY3FtVyFmGP) + +# ComfyUI-Impact-Pack + +**Custom nodes pack for ComfyUI** +This custom node helps to conveniently enhance images through Detector, Detailer, Upscaler, Pipe, and more. + + +## NOTICE +* V6.0: Supports FLUX.1 model in Impact KSampler, Detailers, PreviewBridgeLatent +* V5.0: It is no longer compatible with versions of ComfyUI before 2024.04.08. +* V4.87.4: Update to a version of ComfyUI after 2024.04.08 for proper functionality. +* V4.85: Incompatible with the outdated **ComfyUI IPAdapter Plus**. (A version dated March 24th or later is required.) +* V4.77: Compatibility patch applied. Requires ComfyUI version (Oct. 8th) or later. +* V4.73.3: ControlNetApply (SEGS) supports AnimateDiff +* V4.20.1: Due to the feature update in `RegionalSampler`, the parameter order has changed, causing malfunctions in previously created `RegionalSamplers`. Please adjust the parameters accordingly. +* V4.12: `MASKS` is changed to `MASK`. +* V4.7.2 isn't compatible with old version of `ControlNet Auxiliary Preprocessor`. If you will use `MediaPipe FaceMesh to SEGS` update to latest version(Sep. 17th). +* Selection weight syntax is changed(: -> ::) since V3.16. ([tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md)) +* Starting from V3.6, requires latest version(Aug 8, 9ccc965) of ComfyUI. +* **In versions below V3.3.1, there was an issue with the image quality generated after using the UltralyticsDetectorProvider. Please make sure to upgrade to a newer version.** +* Starting from V3.0, nodes related to `mmdet` are optional nodes that are activated only based on the configuration settings. + - Through ComfyUI-Impact-Subpack, you can utilize UltralyticsDetectorProvider to access various detection models. +* Between versions 2.22 and 2.21, there is partial compatibility loss regarding the Detailer workflow. If you continue to use the existing workflow, errors may occur during execution. An additional output called "enhanced_alpha_list" has been added to Detailer-related nodes. +* The permission error related to cv2 that occurred during the installation of Impact Pack has been patched in version 2.21.4. However, please note that the latest versions of ComfyUI and ComfyUI-Manager are required. +* The "PreviewBridge" feature may not function correctly on ComfyUI versions released before July 1, 2023. +* Attempting to load the "ComfyUI-Impact-Pack" on ComfyUI versions released before June 27, 2023, will result in a failure. +* With the addition of wildcard support in FaceDetailer, the structure of DETAILER_PIPE-related nodes and Detailer nodes has changed. There may be malfunctions when using the existing workflow. + + +## Custom Nodes +### [Detector nodes](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/detectors.md) + * `SAMLoader` - Loads the SAM model. + * `UltralyticsDetectorProvider` - Loads the Ultralystics model to provide SEGM_DETECTOR, BBOX_DETECTOR. + - Unlike `MMDetDetectorProvider`, for segm models, `BBOX_DETECTOR` is also provided. + - The various models available in UltralyticsDetectorProvider can be downloaded through **ComfyUI-Manager**. + * `ONNXDetectorProvider` - Loads the ONNX model to provide BBOX_DETECTOR. + * `CLIPSegDetectorProvider` - Wrapper for CLIPSeg to provide BBOX_DETECTOR. + * You need to install the ComfyUI-CLIPSeg node extension. + * `SEGM Detector (combined)` - Detects segmentation and returns a mask from the input image. + * `BBOX Detector (combined)` - Detects bounding boxes and returns a mask from the input image. + * `SAMDetector (combined)` - Utilizes the SAM technology to extract the segment at the location indicated by the input SEGS on the input image and outputs it as a unified mask. + * `SAMDetector (Segmented)` - It is similar to `SAMDetector (combined)`, but it separates and outputs the detected segments. Multiple segments can be found for the same detected area, and currently, a policy is in place to group them arbitrarily in sets of three. This aspect is expected to be improved in the future. + * As a result, it outputs the `combined_mask`, which is a unified mask, and `batch_masks`, which are multiple masks grouped together in batch form. + * While `batch_masks` may not be completely separated, it provides functionality to perform some level of segmentation. + * `Simple Detector (SEGS)` - Operating primarily with `BBOX_DETECTOR`, and with the additional provision of `SAM_MODEL` or `SEGM_DETECTOR`, this node internally generates improved SEGS through mask operations on both *bbox* and *silhouette*. It serves as a convenient tool to simplify a somewhat intricate workflow. + +### ControlNet, IPAdapter + * `ControlNetApply (SEGS)` - To apply ControlNet in SEGS, you need to use the Preprocessor Provider node from the Inspire Pack to utilize this node. + * `segs_preprocessor` and `control_image` can be selectively applied. If an `control_image` is given, `segs_preprocessor` will be ignored. + * If set to `control_image`, you can preview the cropped cnet image through `SEGSPreview (CNET Image)`. Images generated by `segs_preprocessor` should be verified through the `cnet_images` output of each Detailer. + * The `segs_preprocessor` operates by applying preprocessing on-the-fly based on the cropped image during the detailing process, while `control_image` will be cropped and used as input to `ControlNetApply (SEGS)`. + * `ControlNetClear (SEGS)` - Clear applied ControlNet in SEGS + * `IPAdapterApply (SEGS)` - To apply IPAdapter in SEGS, you need to use the Preprocessor Provider node from the Inspire Pack to utilize this node. + +### Mask operation + * `Pixelwise(SEGS & SEGS)` - Performs a 'pixelwise and' operation between two SEGS. + * `Pixelwise(SEGS - SEGS)` - Subtracts one SEGS from another. + * `Pixelwise(SEGS & MASK)` - Performs a pixelwise AND operation between SEGS and MASK. + * `Pixelwise(SEGS & MASKS ForEach)` - Performs a pixelwise AND operation between SEGS and MASKS. + * Please note that this operation is performed with batches of MASKS, not just a single MASK. + * `Pixelwise(MASK & MASK)` - Performs a 'pixelwise and' operation between two masks. + * `Pixelwise(MASK - MASK)` - Subtracts one mask from another. + * `Pixelwise(MASK + MASK)` - Combine two masks. + * `SEGM Detector (SEGS)` - Detects segmentation and returns SEGS from the input image. + * `BBOX Detector (SEGS)` - Detects bounding boxes and returns SEGS from the input image. + * `Dilate Mask` - Dilate Mask. + * Support erosion for negative value. + * `Gaussian Blur Mask` - Apply Gaussian Blur to Mask. You can utilize this for mask feathering. + +### [Detailer nodes](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/detailers.md) + * `Detailer (SEGS)` - Refines the image based on SEGS. + * `DetailerDebug (SEGS)` - Refines the image based on SEGS. Additionally, it provides the ability to monitor the cropped image and the refined image of the cropped image. + * To prevent regeneration caused by the seed that does not change every time when using 'external_seed', please disable the 'seed random generate' option in the 'Detailer...' node. + * `MASK to SEGS` - Generates SEGS based on the mask. + * `MASK to SEGS For AnimateDiff` - Generates SEGS based on the mask for AnimateDiff. + * When using a single mask, convert it to SEGS to apply it to the entire frame. + * When using a batch mask, the contour fill feature is disabled. + * `MediaPipe FaceMesh to SEGS` - Separate each landmark from the mediapipe facemesh image to create labeled SEGS. + * Usually, the size of images created through the MediaPipe facemesh preprocessor is downscaled. It resizes the MediaPipe facemesh image to the original size given as reference_image_opt for matching sizes during processing. + * `ToBinaryMask` - Separates the mask generated with alpha values between 0 and 255 into 0 and 255. The non-zero parts are always set to 255. + * `Masks to Mask List` - This node converts the MASKS in batch form to a list of individual masks. + * `Mask List to Masks` - This node converts the MASK list to MASK batch form. + * `EmptySEGS` - Provides an empty SEGS. + * `MaskPainter` - Provides a feature to draw masks. + * `FaceDetailer` - Easily detects faces and improves them. + * `FaceDetailer (pipe)` - Easily detects faces and improves them (for multipass). + * `MaskDetailer (pipe)` - This is a simple inpaint node that applies the Detailer to the mask area. + + * `FromDetailer (SDXL/pipe)`, `BasicPipe -> DetailerPipe (SDXL)`, `Edit DetailerPipe (SDXL)` - These are pipe functions used in Detailer for utilizing the refiner model of SDXL. + +### SEGS Manipulation nodes + * `SEGSDetailer` - Performs detailed work on SEGS without pasting it back onto the original image. + * `SEGSPaste` - Pastes the results of SEGS onto the original image. + * If `ref_image_opt` is present, the images contained within SEGS are ignored. Instead, the image within `ref_image_opt` corresponding to the crop area of SEGS is taken and pasted. The size of the image in `ref_image_opt` should be the same as the original image size. + * This node can be used in conjunction with the processing results of AnimateDiff. + * `SEGSPreview` - Provides a preview of SEGS. + * This option is used to preview the improved image through `SEGSDetailer` before merging it into the original. Prior to going through ```SEGSDetailer```, SEGS only contains mask information without image information. If fallback_image_opt is connected to the original image, SEGS without image information will generate a preview using the original image. However, if SEGS already contains image information, fallback_image_opt will be ignored. + * This node can be used in conjunction with the processing results of AnimateDiff. + * `SEGSPreview (CNET Image)` - Show images configured with `ControlNetApply (SEGS)` for debugging purposes. + * `SEGSToImageList` - Convert SEGS To Image List + * `SEGSToMaskList` - Convert SEGS To Mask List + * `SEGS Filter (label)` - This node filters SEGS based on the label of the detected areas. + * `SEGS Filter (ordered)` - This node sorts SEGS based on size and position and retrieves SEGs within a certain range. + * `SEGS Filter (range)` - This node retrieves only SEGs from SEGS that have a size and position within a certain range. + * `SEGS Assign (label)` - Assign labels sequentially to SEGS. This node is useful when used with `[LAB]` of FaceDetailer. + * `SEGSConcat` - Concatenate segs1 and segs2. If source shape of segs1 and segs2 are different from segs2 will be ignored. + * `Picker (SEGS)` - Among the input SEGS, you can select a specific SEG through a dialog. If no SEG is selected, it outputs an empty SEGS. Increasing the batch_size of SEGSDetailer can be used for the purpose of selecting from the candidates. + * `Set Default Image For SEGS` - Set a default image for SEGS. SEGS with images set this way do not need to have a fallback image set. When override is set to false, the original image is preserved. + * `Remove Image from SEGS` - Remove the image set for the SEGS that has been configured by "Set Default Image for SEGS" or SEGSDetailer. When the image for the SEGS is removed, the Detailer node will operate based on the currently processed image instead of the SEGS. + * `Make Tile SEGS` - [experimental] Create SEGS in the form of tiles from an image to facilitate experiments for Tiled Upscale using the Detailer. + * The `filter_in_segs_opt` and `filter_out_segs_opt` are optional inputs. If these inputs are provided, when creating the tiles, the mask for each tile is generated by overlapping with the mask of `filter_in_segs_opt` and excluding the overlap with the mask of `filter_out_segs_opt`. Tiles with an empty mask will not be created as SEGS. + * `Dilate Mask (SEGS)` - Dilate/Erosion Mask in SEGS + * `Gaussian Blur Mask (SEGS)` - Apply Gaussian Blur to Mask in SEGS + * `SEGS_ELT Manipulation` - experimental nodes + * `DecomposeSEGS` - Decompose SEGS to allow for detailed manipulation. + * `AssembleSEGS` - Reassemble the decomposed SEGS. + * `From SEG_ELT` - Extract detailed information from SEG_ELT. + * `Edit SEG_ELT` - Modify some of the information in SEG_ELT. + * `Dilate SEG_ELT` - Dilate the mask of SEG_ELT. + * `From SEG_ELT` bbox - Extract coordinate from bbox in SEG_ELT + * `From SEG_ELT` crop_region - Extract coordinate from crop_region in SEG_ELT + * `Count Elt in SEGS` - Number of Elts ins SEGS + +### Pipe nodes + * `ToDetailerPipe`, `FromDetailerPipe` - These nodes are used to bundle multiple inputs used in the detailer, such as models and vae, ..., into a single DETAILER_PIPE or extract the elements that are bundled in the DETAILER_PIPE. + * `ToBasicPipe`, `FromBasicPipe` - These nodes are used to bundle model, clip, vae, positive conditioning, and negative conditioning into a single BASIC_PIPE, or extract each element from the BASIC_PIPE. + * `EditBasicPipe`, `EditDetailerPipe` - These nodes are used to replace some elements in BASIC_PIPE or DETAILER_PIPE. + * `FromDetailerPipe_v2`, `FromBasicPipe_v2` - It has the same functionality as `FromDetailerPipe` and `FromBasicPipe`, but it has an additional output that directly exports the input pipe. It is useful when editing EditBasicPipe and EditDetailerPipe. +* `Latent Scale (on Pixel Space)` - This node converts latent to pixel space, upscales it, and then converts it back to latent. + * If upscale_model_opt is provided, it uses the model to upscale the pixel and then downscales it using the interpolation method provided in scale_method to the target resolution. +* `PixelKSampleUpscalerProvider` - An upscaler is provided that converts latent to pixels using VAEDecode, performs upscaling, converts back to latent using VAEEncode, and then performs k-sampling. This upscaler can be attached to nodes such as `Iterative Upscale` for use. + * Similar to `Latent Scale (on Pixel Space)`, if upscale_model_opt is provided, it performs pixel upscaling using the model. +* `PixelTiledKSampleUpscalerProvider` - It is similar to `PixelKSampleUpscalerProvider`, but it uses `ComfyUI_TiledKSampler` and Tiled VAE Decoder/Encoder to avoid GPU VRAM issues at high resolutions. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + +### PK_HOOK + * `DenoiseScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the denoise to target_denoise as the iterative-step progresses. + * `CfgScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the cfg to target_cfg as the iterative-step progresses. + * `StepsScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the sampling-steps to target_steps as the iterative-step progresses. + * `NoiseInjectionHookProvider` - During each iteration of IterativeUpscale, noise is injected into the latent space while varying the strength according to a schedule. + * You need to install the [BlenderNeko/ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) node extension. + * The seed serves as the initial value required for generating noise, and it increments by 1 with each iteration as the process unfolds. + * The source determines the types of CPU noise and GPU noise to be configured. + * Currently, there is only a simple schedule available, where the strength of the noise varies from start_strength to end_strength during the progression of each iteration. + * `UnsamplerHookProvider` - Apply Unsampler during each iteration. To use this node, ComfyUI_Noise must be installed. + * `PixelKSampleHookCombine` - This is used to connect two PK_HOOKs. hook1 is executed first and then hook2 is executed. + * If you want to simultaneously change cfg and denoise, you can combine the PK_HOOKs of CfgScheduleHookProvider and PixelKSampleHookCombine. + +### DETAILER_HOOK + * `NoiseInjectionDetailerHookProvider` - The `detailer_hook` is a hook in the `Detailer` that injects noise during the processing of each SEGS. + * `UnsamplerDetailerHookProvider` - Apply Unsampler during each cycle. To use this node, ComfyUI_Noise must be installed. + * `DenoiseSchedulerDetailerHookProvider` - During the progress of the cycle, the detailer's denoise is altered up to the `target_denoise`. + * `CoreMLDetailerHookProvider` - CoreML supports only 512x512, 512x768, 768x512, 768x768 size sampling. CoreMLDetailerHookProvider precisely fixes the upscale of the crop_region to this size. When using this hook, it will always be selected size, regardless of the guide_size. However, if the guide_size is too small, skipping will occur. + * `DetailerHookCombine` - This is used to connect two DETAILER_HOOKs. Similar to PixelKSampleHookCombine. + * `SEGSOrderedFilterDetailerHook`, SEGSRangeFilterDetailerHook, SEGSLabelFilterDetailerHook - There are a wrapper node that provides SEGSFilter nodes to be applied in FaceDetailer or Detector by creating DETAILER_HOOK. + * `PreviewDetailerHook` - Connecting this hook node helps provide assistance for viewing previews whenever SEGS Detailing tasks are completed. When working with a large number of SEGS, such as Make Tile SEGS, it allows for monitoring the situation as improvements progress incrementally. + * Since this is the hook applied when pasting onto the original image, it has no effect on nodes like `SEGSDetailer`. + * `VariationNoiseDetailerHookProvider` - Apply variation seed to the detailer. It can be applied in multiple stages through combine. + +### Iterative Upscale nodes + * `Iterative Upscale (Latent/on Pixel Space)` - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. + This takes latent as input and outputs latent as the result. + * `Iterative Upscale (Image)` - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. This takes image as input and outputs image as the result. + * Internally, this node uses 'Iterative Upscale (Latent)'. + +### TwoSamplers nodes +* `TwoSamplersForMask` - This node can apply two samplers depending on the mask area. The base_sampler is applied to the area where the mask is 0, while the mask_sampler is applied to the area where the mask is 1. + * Note: The latent encoded through VAEEncodeForInpaint cannot be used. +* `KSamplerProvider` - This is a wrapper that enables KSampler to be used in TwoSamplersForMask TwoSamplersForMaskUpscalerProvider. +* `TiledKSamplerProvider` - ComfyUI_TiledKSampler is a wrapper that provides KSAMPLER. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + +* `TwoAdvancedSamplersForMask` - TwoSamplersForMask is similar to TwoAdvancedSamplersForMask, but they differ in their operation. TwoSamplersForMask performs sampling in the mask area only after all the samples in the base area are finished. On the other hand, TwoAdvancedSamplersForMask performs sampling in both the base area and the mask area sequentially at each step. +* `KSamplerAdvancedProvider` - This is a wrapper that enables KSampler to be used in TwoAdvancedSamplersForMask, RegionalSampler. + * sigma_factor: By multiplying the denoise schedule by the sigma_factor, you can adjust the amount of denoising based on the configured denoise. + +* `TwoSamplersForMaskUpscalerProvider` - This is an Upscaler that extends TwoSamplersForMask to be used in Iterative Upscale. + * TwoSamplersForMaskUpscalerProviderPipe - pipe version of TwoSamplersForMaskUpscalerProvider. + +### Image Utils + * `PreviewBridge (image)` - This custom node can be used with a bridge for image when using the MaskEditor feature of Clipspace. + * `PreviewBridge (latent)` - This custom node can be used with a bridge for latent image when using the MaskEditor feature of Clipspace. + * If a latent with a mask is provided as input, it displays the mask. Additionally, the mask output provides the mask set in the latent. + * If a latent without a mask is provided as input, it outputs the original latent as is, but the mask output provides an output with the entire region set as a mask. + * When set mask through MaskEditor, a mask is applied to the latent, and the output includes the stored mask. The same mask is also output as the mask output. + * When connected to `vae_opt`, it takes higher priority than the `preview_method`. + * `ImageSender`, `ImageReceiver` - The images generated in ImageSender are automatically sent to the ImageReceiver with the same link_id. + * `LatentSender`, `LatentReceiver` - The latent generated in LatentSender are automatically sent to the LatentReceiver with the same link_id. + * Furthermore, LatentSender is implemented with PreviewLatent, which stores the latent in payload form within the image thumbnail. + * Due to the current structure of ComfyUI, it is unable to distinguish between SDXL latent and SD1.5/SD2.1 latent. Therefore, it generates thumbnails by decoding them using the SD1.5 method. + +### Switch nodes + * `Switch (image,mask)`, `Switch (latent)`, `Switch (SEGS)` - Among multiple inputs, it selects the input designated by the selector and outputs it. The first input must be provided, while the others are optional. However, if the input specified by the selector is not connected, an error may occur. + * `Switch (Any)` - This is a Switch node that takes an arbitrary number of inputs and produces a single output. Its type is determined when connected to any node, and connecting inputs increases the available slots for connections. + * `Inversed Switch (Any)` - In contrast to `Switch (Any)`, it takes a single input and outputs one of many. Due to ComfyUI's functional limitations, the value of `select` must be determined at the time of queuing a prompt, and while it can serve as a `Primitive Node` or `ImpactInt`, it cannot function properly when connected through other nodes. + * Guide + * When the `Switch (Any)` and `Inversed Switch (Any)` selects are transformed into primitives, it's important to be cautious because the select range is not appropriately constrained, potentially leading to unintended behavior. + * `Switch (image,mask)`, `Switch (latent)`, `Switch (SEGS)`, `Switch (Any)` supports `sel_mode` param. The `sel_mode` sets the moment at which the `select` parameter is determined. `select_on_prompt` determines the `select` at the time of queuing the prompt, while `select_on_execution` determines it during the execution of the workflow. While `select_on_execution` offers more flexibility, it can potentially trigger workflow execution errors due to running nodes that may be impossible to execute within the limitations of ComfyUI. `select_on_prompt` bypasses this constraint by treating any inputs not selected as if they were disconnected. However, please note that when using `select_on_prompt`, the `select` can only be used with widgets or `Primitive Nodes` determined at the queue prompt. + * There is an issue when connecting the built-in reroute node with the switch's input/output slots. it can lead to forced disconnections during workflow loading. Therefore, it is advisable not to use reroute for making connections in such cases. However, there are no issues when using the reroute node in Pythongossss. + +### [Wildcards](http://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md) nodes + * These are nodes that supports syntax in the form of `__wildcard-name__` and dynamic prompt syntax like `{a|b|c}`. + * Wildcard files can be used by placing `.txt` or `.yaml` files under either `ComfyUI-Impact-Pack/wildcards` or `ComfyUI-Impact-Pack/custom_wildcards` paths. + * You can download and use [Wildcard YAML](https://civitai.com/models/138970/billions-of-wildcards-all-in-one) files in this format. + * After the first execution, you can change the custom wildcards path in the `custom_wildcards` entry within the `ComfyUI-Impact-Pack/impact-pack.ini` file created. + * `ImpactWildcardProcessor` - The text is generated by processing the wildcard in the Text. If the mode is set to "populate", a dynamic prompt is generated with each execution and the input is filled in the second textbox. If the mode is set to "fixed", the content of the second textbox remains unchanged. + * When an image is generated with the "fixed" mode, the prompt used for that particular generation is stored in the metadata. + * `ImpactWildcardEncode` - Similar to ImpactWildcardProcessor, this provides the loading functionality of LoRAs (e.g. ``). Populated prompts are encoded using the clip after all the lora loading is done. + * If the `Inspire Pack` is installed, you can use **Lora Block Weight** in the form of `LBW=lbw spec;` + * ``, ``, `` + +### Regional Sampling + * These nodes offer the capability to divide regions and perform partial sampling using a mask. Unlike TwoSamplersForMask, sampling for each region is applied during each step. + * `RegionalPrompt` - This node combines a **mask** for specifying regions and the **sampler** to apply to each region to create `REGIONAL_PROMPTS`. + * `CombineRegionalPrompts` - Combine multiple `REGIONAL_PROMPTS` to create a single `REGIONAL_PROMPTS`. + * `RegionalSampler` - This node performs sampling using a base sampler and regional prompts. Sampling by the base sampler is executed at each step, while sampling for each region is performed through the sampler bound to each region. + * overlap_factor - Specifies the amount of overlap for each region to blend well with the area outside the mask. + * restore_latent - When sampling each region, restore the areas outside the mask to the base latent, preventing additional noise from being introduced outside the mask during region sampling. + * `RegionalSamplerAdvanced` - This is the Advanced version of the RegionalSampler. You can control it using `step` instead of `denoise`. + > NOTE: The `sde` sampler and `uni_pc` sampler introduce additional noise during each step of the sampling process. To mitigate this, when sampling each region, the `uni_pc` sampler applies additional `dpmpp_fast`, and the sde sampler applies the `dpmpp_2m` sampler as an additional measure. + + +### Impact KSampler + * These samplers support basic_pipe and AYS scheduler + * `KSampler (pipe)` - pipe version of KSampler + * `KSampler (advanced/pipe)` - pipe version of KSamplerAdvacned + * When converting the scheduler widget to input, refer to the `Impact Scheduler Adapter` node to resolve compatibility issues. + * `GITSScheduler Func Provider` - provider scheduler function for GITSScheduler + + +### Batch/List Util + * `Image batch To Image List` - Convert Image batch to Image List + - You can use images generated in a multi batch to handle them + * `Make Image List` - Convert multiple images into a single image list + * `Make Image Batch` - Convert multiple images into a single image batch + - The input of images can be scaled up as needed + + +### Logics (experimental) + * These nodes are experimental nodes designed to implement the logic for loops and dynamic switching. + * `ImpactCompare`, `ImpactConditionalBranch`, `ImpactConditionalBranchSelMode`, `ImpactInt`, `ImpactValueSender`, `ImpactValueReceiver`, `ImpactImageInfo`, `ImpactMinMax`, `ImpactNeg`, `ImpactConditionalStopIteration` + * `ImpactIsNotEmptySEGS` - This node returns `true` only if the input SEGS is not empty. + * `ImpactIfNone` - Returns `true` if any_input is None, and returns `false` if it is not None. + * `Queue Trigger` - When this node is executed, it adds a new queue to assist with repetitive tasks. It will only execute if the signal's status changes. + * `Queue Trigger (Countdown)` - Like the Queue Trigger, it adds a queue, but only adds it if it's greater than 1, and decrements the count by one each time it runs. + * `Sleep` - Waits for the specified time (in seconds). + * `Set Widget Value` - This node sets one of the optional inputs to the specified node's widget. An error may occur if the types do not match. + * `Set Mute State` - This node changes the mute state of a specific node. + * `Control Bridge` - This node modifies the state of the connected control nodes based on the `mode` and `behavior` . If there are nodes that require a change, the current execution is paused, the mute status is updated, and a new prompt queue is inserted. + * When the `mode` is `active`, it makes the connected control nodes active regardless of the behavior. + * When the `mode` is `Bypass/Mute`, it changes the state of the connected nodes based on whether the behavior is `Bypass` or `Mute`. + * **Limitation**: Due to these characteristics, it does not function correctly when the batch count exceeds 1. Additionally, it does not guarantee proper operation when the seed is randomized or when the state of nodes is altered by actions such as `Queue Trigger`, `Set Widget Value`, `Set Mute`, before the Control Bridge. + * When utilizing this node, please structure the workflow in such a way that `Queue Trigger`, `Set Widget Value`, `Set Mute State`, and similar actions are executed at the end of the workflow. + * If you want to change the value of the seed at each iteration, please ensure that Set Widget Value is executed at the end of the workflow instead of using randomization. + * It is not a problem if the seed changes due to randomization as long as it occurs after the Control Bridge section. + * `Remote Boolean (on prompt)`, `Remote Int (on prompt)` - At the start of the prompt, this node forcibly sets the `widget_value` of `node_id`. It is disregarded if the target widget type is different. + * You can find the `node_id` by checking through [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager) using the format `Badge: #ID Nickname`. + * Experimental set of nodes for implementing loop functionality (tutorial to be prepared later / [example workflow](test/loop-test.json)). + +### HuggingFace nodes + * These nodes provide functionalities based on HuggingFace repository models. + * The path where the HuggingFace model cache is stored can be changed through the `HF_HOME` environment variable. + * `HF Transformers Classifier Provider` - This is a node that provides a classifier based on HuggingFace's transformers models. + * The 'repo id' parameter should contain HuggingFace's repo id. When `preset_repo_id` is set to `Manual repo id`, use the manually entered repo id in `manual_repo_id`. + * e.g. 'rizvandwiki/gender-classification-2' is a repository that provides a model for gender classification. + * `SEGS Classify` - This node utilizes the `TRANSFORMERS_CLASSIFIER` loaded with 'HF Transformers Classifier Provider' to classify `SEGS`. + * The 'expr' allows for forms like `label > number`, and in the case of `preset_expr` being `Manual expr`, it uses the expression entered in `manual_expr`. + * For example, in the case of `male <= 0.4`, if the score of the `male` label in the classification result is less than or equal to 0.4, it is categorized as `filtered_SEGS`, otherwise, it is categorized as `remained_SEGS`. + * For supported labels, please refer to the `config.json` of the respective HuggingFace repository. + * `#Female` and `#Male` are symbols that group multiple labels such as `Female, women, woman, ...`, for convenience, rather than being single labels. + +### Etc nodes + * `Impact Scheduler Adapter` - With the addition of AYS to the scheduler of the Impact Pack and Inspire Pack, there is an issue of incompatibility when the existing scheduler widget is converted to input. The Impact Scheduler Adapter allows for an indirect connection to be possible. + * `StringListToString` - Convert String List to String + * `WildcardPromptFromString` - Create labeled wildcard for detailer from string. + * This node works well when used with MakeTileSEGS. [[Link](https://github.com/ltdrdata/ComfyUI-Impact-Pack/pull/536#discussion_r1586060779)] + + * `String Selector` - It selects and returns a portion of the string. When `multiline` mode is disabled, it simply returns the string of the line pointed to by the selector. When `multiline` mode is enabled, it divides the string based on lines that start with `#` and returns them. If the `select` value is larger than the number of items, it will start counting from the first line again and return accordingly. + * `Combine Conditionings` - It takes multiple conditionings as input and combines them into a single conditioning. + * `Concat Conditionings` - It takes multiple conditionings as input and concat them into a single conditioning. + * `Negative Cond Placeholder` - Models like FLUX.1 do not use Negative Conditioning. This is a placeholder node for them. You can use FLUX.1 by replacing the Negative Conditioning used in Impact KSampler, KSampler (Inspire), and Detailer with this node. + + +## MMDet nodes (DEPRECATED) - Don't use these nodes +* MMDetDetectorProvider - Loads the MMDet model to provide BBOX_DETECTOR and SEGM_DETECTOR. +* To use the existing MMDetDetectorProvider, you need to enable the MMDet usage configuration. + + +## Feature +* `Interactive SAM Detector (Clipspace)` - When you right-click on a node that has 'MASK' and 'IMAGE' outputs, a context menu will open. From this menu, you can either open a dialog to create a SAM Mask using 'Open in SAM Detector', or copy the content (likely mask data) using 'Copy (Clipspace)' and generate a mask using 'Impact SAM Detector' from the clipspace menu, and then paste it using 'Paste (Clipspace)'. +* Providing a feature to detect errors that occur when mixing models and clips from checkpoints such as `SDXL Base`, `SDXL Refiner`, `SD1.x`, `SD2.x` during sample execution, and reporting appropriate errors. + + +## Deprecated +* The following nodes have been kept only for compatibility with existing workflows, and are no longer supported. Please replace them with new nodes. + * ONNX Detector (SEGS) - BBOX Detector (SEGS) + * MMDetLoader -> MMDetDetectorProvider + * SegsMaskCombine -> SEGS to MASK (combined) + * BboxDetectorForEach -> BBOX Detector (SEGS) + * SegmDetectorForEach -> SEGM Detector (SEGS) + * BboxDetectorCombined -> BBOX Detector (combined) + * SegmDetectorCombined -> SEGM Detector (combined) + * MaskPainter -> PreviewBridge +* To use the existing deprecated legacy nodes, you need to enable the MMDet usage configuration. + + +## Ultralytics models +* huggingface.co/Bingsu/[adetailer](https://github.com/ultralytics/assets/releases/) - You can download face, people detection models, and clothing detection models. +* ultralytics/[assets](https://github.com/ultralytics/assets/releases/) - You can download various types of detection models other than faces or people. +* civitai/[adetailer](https://civitai.com/search/models?sortBy=models_v5&query=adetailer) - You can download various types detection models....Many models are associated with NSFW content. + +## How to activate 'MMDet usage' (DEPRECATED) +* Upon the initial execution, an `impact-pack.ini` file will be generated in the custom_nodes/ComfyUI-Impact-Pack directory. +``` +[default] +dependency_version = 2 +mmdet_skip = True +``` +* Change `mmdet_skip = True` to `mmdet_skip = False` +``` +[default] +dependency_version = 2 +mmdet_skip = False +``` +* Restart ComfyUI + + +## Installation + +1. `cd custom_nodes` +2. `git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack.git` +3. `cd ComfyUI-Impact-Pack` +4. (optional) `git clone https://github.com/ltdrdata/ComfyUI-Impact-Subpack impact_subpack` + * Impact Pack will automatically download subpack during its initial launch. +5. (optional) `python install.py` + * Impact Pack will automatically install its dependencies during its initial launch. + * For the portable version, you should execute the command `..\..\..\python_embeded\python.exe install.py` to run the installation script. + + +6. Restart ComfyUI + +* NOTE1: If an error occurs during the installation process, please refer to [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) for assistance. +* NOTE2: You can use this colab notebook [colab notebook](https://colab.research.google.com/github/ltdrdata/ComfyUI-Impact-Pack/blob/Main/notebook/comfyui_colab_impact_pack.ipynb) to launch it. This notebook automatically downloads the impact pack to the custom_nodes directory, installs the tested dependencies, and runs it. +* NOTE3: If you create an empty file named `skip_download_model` in the `ComfyUI/custom_nodes/` directory, it will skip the model download step during the installation of the impact pack. + +## Package Dependencies (If you need to manual setup.) + +* pip install + * openmim + * segment-anything + * ultralytics + * scikit-image + * piexif + * (optional) pycocotools + * (optional) onnxruntime + +* mim install (deprecated) + * mmcv==2.0.0, mmdet==3.0.0, mmengine==0.7.2 + +* linux packages (ubuntu) + * libgl1-mesa-glx + * libglib2.0-0 + + +## Config example +* Once you run the Impact Pack for the first time, an `impact-pack.ini` file will be automatically generated in the Impact Pack directory. You can modify this configuration file to customize the default behavior. + * `dependency_version` - don't touch this + * `mmdet_skip` - disable MMDet based nodes and legacy nodes if `True` + * `sam_editor_cpu` - use cpu for `SAM editor` instead of gpu + * sam_editor_model: Specify the SAM model for the SAM editor. + * You can download various SAM models using ComfyUI-Manager. + * Path to SAM model: `ComfyUI/models/sams` +``` +[default] +dependency_version = 9 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +``` + + +## Other Materials (auto-download on initial startup) + +* ComfyUI/models/mmdets/bbox <= https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth +* ComfyUI/models/mmdets/bbox <= https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_anime-face_yolov3.py +* ComfyUI/models/sams <= https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth + +## Troubleshooting page +* [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) + + +## How to use (DDetailer feature) + +#### 1. Basic auto face detection and refine exapmle. +![simple](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple.png) +* The face that has been damaged due to low resolution is restored with high resolution by generating and synthesizing it, in order to restore the details. +* The FaceDetailer node is a combination of a Detector node for face detection and a Detailer node for image enhancement. See the [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) for a more detailed explanation. +* Pass the MMDetLoader 's bbox model and the detection model loaded by SAMLoader to FaceDetailer . Since it performs the function of KSampler for image enhancement, it overlaps with KSampler's options. +* The MASK output of FaceDetailer provides a visualization of where the detected and enhanced areas are. + +![simple-orig](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple-original.png) ![simple-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple-refined.png) +* You can see that the face in the image on the left has increased detail as in the image on the right. + +#### 2. 2Pass refine (restore a severely damaged face) +![2pass-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-simple.png) +* Although two FaceDetailers can be attached together for a 2-pass configuration, various common inputs used in KSampler can be passed through DETAILER_PIPE, so FaceDetailerPipe can be used to configure easily. +* In 1pass, only rough outline recovery is required, so restore with a reasonable resolution and low options. However, if you increase the dilation at this time, not only the face but also the surrounding parts are included in the recovery range, so it is useful when you need to reshape the face other than the facial part. + +![2pass-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-original.png) ![2pass-example-middle](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-1pass.png) ![2pass-example-result](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-2pass.png) +* In the first stage, the severely damaged face is restored to some extent, and in the second stage, the details are restored + +#### 3. Face Bbox(bounding box) + Person silhouette segmentation (prevent distortion of the background.) +![combination-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination.jpg) +![combination-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination-original.png) ![combination-example-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination-refined.png) + +* Facial synthesis that emphasizes details is delicately aligned with the contours of the face, and it can be observed that it does not affect the image outside of the face. + +* The BBoxDetectorForEach node is used to detect faces, and the SAMDetectorCombined node is used to find the segment related to the detected face. By using the Segs & Mask node with the two masks obtained in this way, an accurate mask that intersects based on segs can be generated. If this generated mask is input to the DetailerForEach node, only the target area can be created in high resolution from the image and then composited. + +#### 4. Iterative Upscale +![upscale-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-workflow.png) + +* The IterativeUpscale node is a node that enlarges an image/latent by a scale_factor. In this process, the upscale is carried out progressively by dividing it into steps. +* IterativeUpscale takes an Upscaler as an input, similar to a plugin, and uses it during each iteration. PixelKSampleUpscalerProvider is an Upscaler that converts the latent representation to pixel space and applies ksampling. + * The upscale_model_opt is an optional parameter that determines whether to use the upscale function of the model base if available. Using the upscale function of the model base can significantly reduce the number of iterative steps required. If an x2 upscaler is used, the image/latent is first upscaled by a factor of 2 and then downscaled to the target scale at each step before further processing is done. + +* The following image is an image of 304x512 pixels and the same image scaled up to three times its original size using IterativeUpscale. + +![combination-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-original.png) ![combination-example-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-3x.png) + + +#### 5. Interactive SAM Detector (Clipspace) + +* When you right-click on the node that outputs 'MASK' and 'IMAGE', a menu called "Open in SAM Detector" appears, as shown in the following picture. Clicking on the menu opens a dialog in SAM's functionality, allowing you to generate a segment mask. +![samdetector-menu](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-menu.png) + +* By clicking the left mouse button on a coordinate, a positive prompt in blue color is entered, indicating the area that should be included. Clicking the right mouse button on a coordinate enters a negative prompt in red color, indicating the area that should be excluded. Positive prompts represent the areas that should be included, while negative prompts represent the areas that should be excluded. +* You can remove the points that were added by using the "undo" button. After selecting the points, pressing the "detect" button generates the mask. Additionally, you can adjust the fidelity slider to determine the extent to which the mask belongs to the confidence region. + +![samdetector-dialog](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-dialog.jpg) + +* If you opened the dialog through "Open in SAM Detector" from the node, you can directly apply the changes by clicking the "Save to node" button. However, if you opened the dialog through the "clipspace" menu, you can save it to clipspace by clicking the "Save" button. + +![samdetector-result](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-result.jpg) + +* When you execute using the reflected mask in the node, you can observe that the image and mask are displayed separately. + + +## Others Tutorials +* [ComfyUI-extension-tutorials/ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-extension-tutorials/tree/Main/ComfyUI-Impact-Pack) - You can find various tutorials and workflows on this page. +* [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) +* [SAM Application](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam.md) +* [PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/previewbridge.md) +* [Mask Pointer](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/maskpointer.md) +* [ONNX Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ONNX.md) +* [CLIPSeg Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/clipseg.md) +* [Extreme Highresolution Upscale](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/extreme-upscale.md) +* [TwoSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplers.md) +* [TwoAdvancedSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoAdvancedSamplers.md) +* [Advanced Iterative Upscale: PK_HOOK](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/pk_hook.md) +* [Advanced Iterative Upscale: TwoSamplersForMask Upscale Provider](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplersUpscale.md) +* [Interactive SAM + PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam_with_preview_bridge.md) +* [ImageSender/ImageReceiver/LatentSender/LatentReceiver](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sender_receiver.md) +* [ImpactWildcardProcessor](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md) + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +dustysys/[ddetailer](https://github.com/dustysys/ddetailer) - DDetailer for Stable-diffusion-webUI extension. + +Bing-su/[dddetailer](https://github.com/Bing-su/dddetailer) - The anime-face-detector used in ddetailer has been updated to be compatible with mmdet 3.0.0, and we have also applied a patch to the pycocotools dependency for Windows environment in ddetailer. + +facebook/[segment-anything](https://github.com/facebookresearch/segment-anything) - Segmentation Anything! + +hysts/[anime-face-detector](https://github.com/hysts/anime-face-detector) - Creator of `anime-face_yolov3`, which has impressive performance on a variety of art styles. + +open-mmlab/[mmdetection](https://github.com/open-mmlab/mmdetection) - Object detection toolset. `dd-person_mask2former` was trained via transfer learning using their [R-50 Mask2Former instance segmentation model](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former#instance-segmentation) as a base. + +biegert/[ComfyUI-CLIPSeg](https://github.com/biegert/ComfyUI-CLIPSeg) - This is a custom node that enables the use of CLIPSeg technology, which can find segments through prompts, in ComfyUI. + +BlenderNeok/[ComfyUI-TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) - The tile sampler allows high-resolution sampling even in places with low GPU VRAM. + +BlenderNeok/[ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) - The noise injection feature relies on this function and slerp code for noise variation + +WASasquatch/[was-node-suite-comfyui](https://github.com/WASasquatch/was-node-suite-comfyui) - A powerful custom node extensions of ComfyUI. + +Trung0246/[ComfyUI-0246](https://github.com/Trung0246/ComfyUI-0246) - Nice bypass hack! diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__init__.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db18e48744148a4faa30ca02b50a564de6385f69 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__init__.py @@ -0,0 +1,497 @@ +""" +@author: Dr.Lt.Data +@title: Impact Pack +@nickname: Impact Pack +@description: This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler. +""" + +import shutil +import folder_paths +import os +import sys +import traceback + +comfy_path = os.path.dirname(folder_paths.__file__) +impact_path = os.path.join(os.path.dirname(__file__)) +subpack_path = os.path.join(os.path.dirname(__file__), "impact_subpack") +modules_path = os.path.join(os.path.dirname(__file__), "modules") + +sys.path.append(modules_path) + +import impact.config +import impact.sample_error_enhancer +print(f"### Loading: ComfyUI-Impact-Pack ({impact.config.version})") + + +def do_install(): + import importlib + spec = importlib.util.spec_from_file_location('impact_install', os.path.join(os.path.dirname(__file__), 'install.py')) + impact_install = importlib.util.module_from_spec(spec) + spec.loader.exec_module(impact_install) + + +# ensure dependency +if not os.path.exists(os.path.join(subpack_path, ".git")) and os.path.exists(subpack_path): + print(f"### CompfyUI-Impact-Pack: corrupted subpack detected.") + shutil.rmtree(subpack_path) + +if impact.config.get_config()['dependency_version'] < impact.config.dependency_version or not os.path.exists(subpack_path): + print(f"### ComfyUI-Impact-Pack: Updating dependencies [{impact.config.get_config()['dependency_version']} -> {impact.config.dependency_version}]") + do_install() + +sys.path.append(subpack_path) + +# Core +# recheck dependencies for colab +try: + import impact.subpack_nodes # This import must be done before cv2. + + import folder_paths + import torch + import cv2 + from cv2 import setNumThreads + import numpy as np + import comfy.samplers + import comfy.sd + import warnings + from PIL import Image, ImageFilter + from skimage.measure import label, regionprops + from collections import namedtuple + import piexif + + if not impact.config.get_config()['mmdet_skip']: + import mmcv + from mmdet.apis import (inference_detector, init_detector) + from mmdet.evaluation import get_classes +except: + import importlib + print("### ComfyUI-Impact-Pack: Reinstall dependencies (several dependencies are missing.)") + do_install() + + +import impact.impact_server # to load server api + +from .modules.impact.impact_pack import * +from .modules.impact.detectors import * +from .modules.impact.pipe import * +from .modules.impact.logics import * +from .modules.impact.util_nodes import * +from .modules.impact.segs_nodes import * +from .modules.impact.special_samplers import * +from .modules.impact.hf_nodes import * +from .modules.impact.bridge_nodes import * +from .modules.impact.hook_nodes import * +from .modules.impact.animatediff_nodes import * +from .modules.impact.segs_upscaler import * + +import threading + + +threading.Thread(target=impact.wildcards.wildcard_load).start() + + +NODE_CLASS_MAPPINGS = { + "SAMLoader": SAMLoader, + "CLIPSegDetectorProvider": CLIPSegDetectorProvider, + "ONNXDetectorProvider": ONNXDetectorProvider, + + "BitwiseAndMaskForEach": BitwiseAndMaskForEach, + "SubtractMaskForEach": SubtractMaskForEach, + + "DetailerForEach": DetailerForEach, + "DetailerForEachDebug": DetailerForEachTest, + "DetailerForEachPipe": DetailerForEachPipe, + "DetailerForEachDebugPipe": DetailerForEachTestPipe, + "DetailerForEachPipeForAnimateDiff": DetailerForEachPipeForAnimateDiff, + + "SAMDetectorCombined": SAMDetectorCombined, + "SAMDetectorSegmented": SAMDetectorSegmented, + + "FaceDetailer": FaceDetailer, + "FaceDetailerPipe": FaceDetailerPipe, + "MaskDetailerPipe": MaskDetailerPipe, + + "ToDetailerPipe": ToDetailerPipe, + "ToDetailerPipeSDXL": ToDetailerPipeSDXL, + "FromDetailerPipe": FromDetailerPipe, + "FromDetailerPipe_v2": FromDetailerPipe_v2, + "FromDetailerPipeSDXL": FromDetailerPipe_SDXL, + "ToBasicPipe": ToBasicPipe, + "FromBasicPipe": FromBasicPipe, + "FromBasicPipe_v2": FromBasicPipe_v2, + "BasicPipeToDetailerPipe": BasicPipeToDetailerPipe, + "BasicPipeToDetailerPipeSDXL": BasicPipeToDetailerPipeSDXL, + "DetailerPipeToBasicPipe": DetailerPipeToBasicPipe, + "EditBasicPipe": EditBasicPipe, + "EditDetailerPipe": EditDetailerPipe, + "EditDetailerPipeSDXL": EditDetailerPipeSDXL, + + "LatentPixelScale": LatentPixelScale, + "PixelKSampleUpscalerProvider": PixelKSampleUpscalerProvider, + "PixelKSampleUpscalerProviderPipe": PixelKSampleUpscalerProviderPipe, + "IterativeLatentUpscale": IterativeLatentUpscale, + "IterativeImageUpscale": IterativeImageUpscale, + "PixelTiledKSampleUpscalerProvider": PixelTiledKSampleUpscalerProvider, + "PixelTiledKSampleUpscalerProviderPipe": PixelTiledKSampleUpscalerProviderPipe, + "TwoSamplersForMaskUpscalerProvider": TwoSamplersForMaskUpscalerProvider, + "TwoSamplersForMaskUpscalerProviderPipe": TwoSamplersForMaskUpscalerProviderPipe, + + "PixelKSampleHookCombine": PixelKSampleHookCombine, + "DenoiseScheduleHookProvider": DenoiseScheduleHookProvider, + "StepsScheduleHookProvider": StepsScheduleHookProvider, + "CfgScheduleHookProvider": CfgScheduleHookProvider, + "NoiseInjectionHookProvider": NoiseInjectionHookProvider, + "UnsamplerHookProvider": UnsamplerHookProvider, + "CoreMLDetailerHookProvider": CoreMLDetailerHookProvider, + "PreviewDetailerHookProvider": PreviewDetailerHookProvider, + + "DetailerHookCombine": DetailerHookCombine, + "NoiseInjectionDetailerHookProvider": NoiseInjectionDetailerHookProvider, + "UnsamplerDetailerHookProvider": UnsamplerDetailerHookProvider, + "DenoiseSchedulerDetailerHookProvider": DenoiseSchedulerDetailerHookProvider, + "SEGSOrderedFilterDetailerHookProvider": SEGSOrderedFilterDetailerHookProvider, + "SEGSRangeFilterDetailerHookProvider": SEGSRangeFilterDetailerHookProvider, + "SEGSLabelFilterDetailerHookProvider": SEGSLabelFilterDetailerHookProvider, + "VariationNoiseDetailerHookProvider": VariationNoiseDetailerHookProvider, + # "CustomNoiseDetailerHookProvider": CustomNoiseDetailerHookProvider, + + "BitwiseAndMask": BitwiseAndMask, + "SubtractMask": SubtractMask, + "AddMask": AddMask, + "ImpactSegsAndMask": SegsBitwiseAndMask, + "ImpactSegsAndMaskForEach": SegsBitwiseAndMaskForEach, + "EmptySegs": EmptySEGS, + + "MediaPipeFaceMeshToSEGS": MediaPipeFaceMeshToSEGS, + "MaskToSEGS": MaskToSEGS, + "MaskToSEGS_for_AnimateDiff": MaskToSEGS_for_AnimateDiff, + "ToBinaryMask": ToBinaryMask, + "MasksToMaskList": MasksToMaskList, + "MaskListToMaskBatch": MaskListToMaskBatch, + "ImageListToImageBatch": ImageListToImageBatch, + "SetDefaultImageForSEGS": DefaultImageForSEGS, + "RemoveImageFromSEGS": RemoveImageFromSEGS, + + "BboxDetectorSEGS": BboxDetectorForEach, + "SegmDetectorSEGS": SegmDetectorForEach, + "ONNXDetectorSEGS": BboxDetectorForEach, + "ImpactSimpleDetectorSEGS_for_AD": SimpleDetectorForAnimateDiff, + "ImpactSimpleDetectorSEGS": SimpleDetectorForEach, + "ImpactSimpleDetectorSEGSPipe": SimpleDetectorForEachPipe, + "ImpactControlNetApplySEGS": ControlNetApplySEGS, + "ImpactControlNetApplyAdvancedSEGS": ControlNetApplyAdvancedSEGS, + "ImpactControlNetClearSEGS": ControlNetClearSEGS, + "ImpactIPAdapterApplySEGS": IPAdapterApplySEGS, + + "ImpactDecomposeSEGS": DecomposeSEGS, + "ImpactAssembleSEGS": AssembleSEGS, + "ImpactFrom_SEG_ELT": From_SEG_ELT, + "ImpactEdit_SEG_ELT": Edit_SEG_ELT, + "ImpactDilate_Mask_SEG_ELT": Dilate_SEG_ELT, + "ImpactDilateMask": DilateMask, + "ImpactGaussianBlurMask": GaussianBlurMask, + "ImpactDilateMaskInSEGS": DilateMaskInSEGS, + "ImpactGaussianBlurMaskInSEGS": GaussianBlurMaskInSEGS, + "ImpactScaleBy_BBOX_SEG_ELT": SEG_ELT_BBOX_ScaleBy, + "ImpactFrom_SEG_ELT_bbox": From_SEG_ELT_bbox, + "ImpactFrom_SEG_ELT_crop_region": From_SEG_ELT_crop_region, + "ImpactCount_Elts_in_SEGS": Count_Elts_in_SEGS, + + "BboxDetectorCombined_v2": BboxDetectorCombined, + "SegmDetectorCombined_v2": SegmDetectorCombined, + "SegsToCombinedMask": SegsToCombinedMask, + + "KSamplerProvider": KSamplerProvider, + "TwoSamplersForMask": TwoSamplersForMask, + "TiledKSamplerProvider": TiledKSamplerProvider, + + "KSamplerAdvancedProvider": KSamplerAdvancedProvider, + "TwoAdvancedSamplersForMask": TwoAdvancedSamplersForMask, + + "ImpactNegativeConditioningPlaceholder": NegativeConditioningPlaceholder, + + "PreviewBridge": PreviewBridge, + "PreviewBridgeLatent": PreviewBridgeLatent, + "ImageSender": ImageSender, + "ImageReceiver": ImageReceiver, + "LatentSender": LatentSender, + "LatentReceiver": LatentReceiver, + "ImageMaskSwitch": ImageMaskSwitch, + "LatentSwitch": GeneralSwitch, + "SEGSSwitch": GeneralSwitch, + "ImpactSwitch": GeneralSwitch, + "ImpactInversedSwitch": GeneralInversedSwitch, + + "ImpactWildcardProcessor": ImpactWildcardProcessor, + "ImpactWildcardEncode": ImpactWildcardEncode, + + "SEGSUpscaler": SEGSUpscaler, + "SEGSUpscalerPipe": SEGSUpscalerPipe, + "SEGSDetailer": SEGSDetailer, + "SEGSPaste": SEGSPaste, + "SEGSPreview": SEGSPreview, + "SEGSPreviewCNet": SEGSPreviewCNet, + "SEGSToImageList": SEGSToImageList, + "ImpactSEGSToMaskList": SEGSToMaskList, + "ImpactSEGSToMaskBatch": SEGSToMaskBatch, + "ImpactSEGSConcat": SEGSConcat, + "ImpactSEGSPicker": SEGSPicker, + "ImpactMakeTileSEGS": MakeTileSEGS, + + "SEGSDetailerForAnimateDiff": SEGSDetailerForAnimateDiff, + + "ImpactKSamplerBasicPipe": KSamplerBasicPipe, + "ImpactKSamplerAdvancedBasicPipe": KSamplerAdvancedBasicPipe, + + "ReencodeLatent": ReencodeLatent, + "ReencodeLatentPipe": ReencodeLatentPipe, + + "ImpactImageBatchToImageList": ImageBatchToImageList, + "ImpactMakeImageList": MakeImageList, + "ImpactMakeImageBatch": MakeImageBatch, + + "RegionalSampler": RegionalSampler, + "RegionalSamplerAdvanced": RegionalSamplerAdvanced, + "CombineRegionalPrompts": CombineRegionalPrompts, + "RegionalPrompt": RegionalPrompt, + + "ImpactCombineConditionings": CombineConditionings, + "ImpactConcatConditionings": ConcatConditionings, + + "ImpactSEGSLabelAssign": SEGSLabelAssign, + "ImpactSEGSLabelFilter": SEGSLabelFilter, + "ImpactSEGSRangeFilter": SEGSRangeFilter, + "ImpactSEGSOrderedFilter": SEGSOrderedFilter, + + "ImpactCompare": ImpactCompare, + "ImpactConditionalBranch": ImpactConditionalBranch, + "ImpactConditionalBranchSelMode": ImpactConditionalBranchSelMode, + "ImpactIfNone": ImpactIfNone, + "ImpactConvertDataType": ImpactConvertDataType, + "ImpactLogicalOperators": ImpactLogicalOperators, + "ImpactInt": ImpactInt, + "ImpactFloat": ImpactFloat, + "ImpactValueSender": ImpactValueSender, + "ImpactValueReceiver": ImpactValueReceiver, + "ImpactImageInfo": ImpactImageInfo, + "ImpactLatentInfo": ImpactLatentInfo, + "ImpactMinMax": ImpactMinMax, + "ImpactNeg": ImpactNeg, + "ImpactConditionalStopIteration": ImpactConditionalStopIteration, + "ImpactStringSelector": StringSelector, + "StringListToString": StringListToString, + "WildcardPromptFromString": WildcardPromptFromString, + + "RemoveNoiseMask": RemoveNoiseMask, + + "ImpactLogger": ImpactLogger, + "ImpactDummyInput": ImpactDummyInput, + + "ImpactQueueTrigger": ImpactQueueTrigger, + "ImpactQueueTriggerCountdown": ImpactQueueTriggerCountdown, + "ImpactSetWidgetValue": ImpactSetWidgetValue, + "ImpactNodeSetMuteState": ImpactNodeSetMuteState, + "ImpactControlBridge": ImpactControlBridge, + "ImpactIsNotEmptySEGS": ImpactNotEmptySEGS, + "ImpactSleep": ImpactSleep, + "ImpactRemoteBoolean": ImpactRemoteBoolean, + "ImpactRemoteInt": ImpactRemoteInt, + + "ImpactHFTransformersClassifierProvider": HF_TransformersClassifierProvider, + "ImpactSEGSClassify": SEGS_Classify, + + "ImpactSchedulerAdapter": ImpactSchedulerAdapter, + "GITSSchedulerFuncProvider": GITSSchedulerFuncProvider +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + "SAMLoader": "SAMLoader (Impact)", + + "BboxDetectorSEGS": "BBOX Detector (SEGS)", + "SegmDetectorSEGS": "SEGM Detector (SEGS)", + "ONNXDetectorSEGS": "ONNX Detector (SEGS/legacy) - use BBOXDetector", + "ImpactSimpleDetectorSEGS_for_AD": "Simple Detector for AnimateDiff (SEGS)", + "ImpactSimpleDetectorSEGS": "Simple Detector (SEGS)", + "ImpactSimpleDetectorSEGSPipe": "Simple Detector (SEGS/pipe)", + "ImpactControlNetApplySEGS": "ControlNetApply (SEGS)", + "ImpactControlNetApplyAdvancedSEGS": "ControlNetApplyAdvanced (SEGS)", + "ImpactIPAdapterApplySEGS": "IPAdapterApply (SEGS)", + + "BboxDetectorCombined_v2": "BBOX Detector (combined)", + "SegmDetectorCombined_v2": "SEGM Detector (combined)", + "SegsToCombinedMask": "SEGS to MASK (combined)", + "MediaPipeFaceMeshToSEGS": "MediaPipe FaceMesh to SEGS", + "MaskToSEGS": "MASK to SEGS", + "MaskToSEGS_for_AnimateDiff": "MASK to SEGS for AnimateDiff", + "BitwiseAndMaskForEach": "Pixelwise(SEGS & SEGS)", + "SubtractMaskForEach": "Pixelwise(SEGS - SEGS)", + "ImpactSegsAndMask": "Pixelwise(SEGS & MASK)", + "ImpactSegsAndMaskForEach": "Pixelwise(SEGS & MASKS ForEach)", + "BitwiseAndMask": "Pixelwise(MASK & MASK)", + "SubtractMask": "Pixelwise(MASK - MASK)", + "AddMask": "Pixelwise(MASK + MASK)", + "DetailerForEach": "Detailer (SEGS)", + "DetailerForEachPipe": "Detailer (SEGS/pipe)", + "DetailerForEachDebug": "DetailerDebug (SEGS)", + "DetailerForEachDebugPipe": "DetailerDebug (SEGS/pipe)", + "SEGSDetailerForAnimateDiff": "SEGSDetailer For AnimateDiff (SEGS/pipe)", + "DetailerForEachPipeForAnimateDiff": "Detailer For AnimateDiff (SEGS/pipe)", + "SEGSUpscaler": "Upscaler (SEGS)", + "SEGSUpscalerPipe": "Upscaler (SEGS/pipe)", + + "SAMDetectorCombined": "SAMDetector (combined)", + "SAMDetectorSegmented": "SAMDetector (segmented)", + "FaceDetailerPipe": "FaceDetailer (pipe)", + "MaskDetailerPipe": "MaskDetailer (pipe)", + + "FromDetailerPipeSDXL": "FromDetailer (SDXL/pipe)", + "BasicPipeToDetailerPipeSDXL": "BasicPipe -> DetailerPipe (SDXL)", + "EditDetailerPipeSDXL": "Edit DetailerPipe (SDXL)", + + "BasicPipeToDetailerPipe": "BasicPipe -> DetailerPipe", + "DetailerPipeToBasicPipe": "DetailerPipe -> BasicPipe", + "EditBasicPipe": "Edit BasicPipe", + "EditDetailerPipe": "Edit DetailerPipe", + + "LatentPixelScale": "Latent Scale (on Pixel Space)", + "IterativeLatentUpscale": "Iterative Upscale (Latent/on Pixel Space)", + "IterativeImageUpscale": "Iterative Upscale (Image)", + + "TwoSamplersForMaskUpscalerProvider": "TwoSamplersForMask Upscaler Provider", + "TwoSamplersForMaskUpscalerProviderPipe": "TwoSamplersForMask Upscaler Provider (pipe)", + + "ReencodeLatent": "Reencode Latent", + "ReencodeLatentPipe": "Reencode Latent (pipe)", + + "ImpactKSamplerBasicPipe": "KSampler (pipe)", + "ImpactKSamplerAdvancedBasicPipe": "KSampler (Advanced/pipe)", + "ImpactSEGSLabelAssign": "SEGS Assign (label)", + "ImpactSEGSLabelFilter": "SEGS Filter (label)", + "ImpactSEGSRangeFilter": "SEGS Filter (range)", + "ImpactSEGSOrderedFilter": "SEGS Filter (ordered)", + "ImpactSEGSConcat": "SEGS Concat", + "ImpactSEGSToMaskList": "SEGS to Mask List", + "ImpactSEGSToMaskBatch": "SEGS to Mask Batch", + "ImpactSEGSPicker": "Picker (SEGS)", + "ImpactMakeTileSEGS": "Make Tile SEGS", + + "ImpactDecomposeSEGS": "Decompose (SEGS)", + "ImpactAssembleSEGS": "Assemble (SEGS)", + "ImpactFrom_SEG_ELT": "From SEG_ELT", + "ImpactEdit_SEG_ELT": "Edit SEG_ELT", + "ImpactFrom_SEG_ELT_bbox": "From SEG_ELT bbox", + "ImpactFrom_SEG_ELT_crop_region": "From SEG_ELT crop_region", + "ImpactDilate_Mask_SEG_ELT": "Dilate Mask (SEG_ELT)", + "ImpactScaleBy_BBOX_SEG_ELT": "ScaleBy BBOX (SEG_ELT)", + "ImpactCount_Elts_in_SEGS": "Count Elts in SEGS", + "ImpactDilateMask": "Dilate Mask", + "ImpactGaussianBlurMask": "Gaussian Blur Mask", + "ImpactDilateMaskInSEGS": "Dilate Mask (SEGS)", + "ImpactGaussianBlurMaskInSEGS": "Gaussian Blur Mask (SEGS)", + + "PreviewBridge": "Preview Bridge (Image)", + "PreviewBridgeLatent": "Preview Bridge (Latent)", + "ImageSender": "Image Sender", + "ImageReceiver": "Image Receiver", + "ImageMaskSwitch": "Switch (images, mask)", + "ImpactSwitch": "Switch (Any)", + "ImpactInversedSwitch": "Inversed Switch (Any)", + + "MasksToMaskList": "Masks to Mask List", + "MaskListToMaskBatch": "Mask List to Masks", + "ImpactImageBatchToImageList": "Image batch to Image List", + "ImageListToImageBatch": "Image List to Image Batch", + "ImpactMakeImageList": "Make Image List", + "ImpactMakeImageBatch": "Make Image Batch", + "ImpactStringSelector": "String Selector", + "StringListToString": "String List to String", + "WildcardPromptFromString": "Wildcard Prompt from String", + "ImpactIsNotEmptySEGS": "SEGS isn't Empty", + "SetDefaultImageForSEGS": "Set Default Image for SEGS", + "RemoveImageFromSEGS": "Remove Image from SEGS", + + "RemoveNoiseMask": "Remove Noise Mask", + + "ImpactCombineConditionings": "Combine Conditionings", + "ImpactConcatConditionings": "Concat Conditionings", + + "ImpactQueueTrigger": "Queue Trigger", + "ImpactQueueTriggerCountdown": "Queue Trigger (Countdown)", + "ImpactSetWidgetValue": "Set Widget Value", + "ImpactNodeSetMuteState": "Set Mute State", + "ImpactControlBridge": "Control Bridge", + "ImpactSleep": "Sleep", + "ImpactRemoteBoolean": "Remote Boolean (on prompt)", + "ImpactRemoteInt": "Remote Int (on prompt)", + + "ImpactHFTransformersClassifierProvider": "HF Transformers Classifier Provider", + "ImpactSEGSClassify": "SEGS Classify", + + "LatentSwitch": "Switch (latent/legacy)", + "SEGSSwitch": "Switch (SEGS/legacy)", + + "SEGSPreviewCNet": "SEGSPreview (CNET Image)", + + "ImpactSchedulerAdapter": "Impact Scheduler Adapter", + "GITSSchedulerFuncProvider": "GITSScheduler Func Provider", + "ImpactNegativeConditioningPlaceholder": "Negative Cond Placeholder" +} + +if not impact.config.get_config()['mmdet_skip']: + from impact.mmdet_nodes import * + import impact.legacy_nodes + NODE_CLASS_MAPPINGS.update({ + "MMDetDetectorProvider": MMDetDetectorProvider, + "MMDetLoader": impact.legacy_nodes.MMDetLoader, + "MaskPainter": impact.legacy_nodes.MaskPainter, + "SegsMaskCombine": impact.legacy_nodes.SegsMaskCombine, + "BboxDetectorForEach": impact.legacy_nodes.BboxDetectorForEach, + "SegmDetectorForEach": impact.legacy_nodes.SegmDetectorForEach, + "BboxDetectorCombined": impact.legacy_nodes.BboxDetectorCombined, + "SegmDetectorCombined": impact.legacy_nodes.SegmDetectorCombined, + }) + + NODE_DISPLAY_NAME_MAPPINGS.update({ + "MaskPainter": "MaskPainter (Deprecated)", + "MMDetLoader": "MMDetLoader (Legacy)", + "SegsMaskCombine": "SegsMaskCombine (Legacy)", + "BboxDetectorForEach": "BboxDetectorForEach (Legacy)", + "SegmDetectorForEach": "SegmDetectorForEach (Legacy)", + "BboxDetectorCombined": "BboxDetectorCombined (Legacy)", + "SegmDetectorCombined": "SegmDetectorCombined (Legacy)", + }) + +try: + import impact.subpack_nodes + + NODE_CLASS_MAPPINGS.update(impact.subpack_nodes.NODE_CLASS_MAPPINGS) + NODE_DISPLAY_NAME_MAPPINGS.update(impact.subpack_nodes.NODE_DISPLAY_NAME_MAPPINGS) +except Exception as e: + print("### ComfyUI-Impact-Pack: (IMPORT FAILED) Subpack\n") + print(" The module at the `custom_nodes/ComfyUI-Impact-Pack/impact_subpack` path appears to be incomplete.") + print(" Recommended to delete the path and restart ComfyUI.") + print(" If the issue persists, please report it to https://github.com/ltdrdata/ComfyUI-Impact-Pack/issues.") + print("\n---------------------------------") + traceback.print_exc() + print("---------------------------------\n") + +# NOTE: Inject directly into EXTENSION_WEB_DIRS instead of WEB_DIRECTORY +# Provide the js path fixed as ComfyUI-Impact-Pack instead of the path name, making it available for external use + +# WEB_DIRECTORY = "js" -- deprecated method +nodes.EXTENSION_WEB_DIRS["ComfyUI-Impact-Pack"] = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'js') + + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] + + +try: + import cm_global + cm_global.register_extension('ComfyUI-Impact-Pack', + {'version': config.version_code, + 'name': 'Impact Pack', + 'nodes': set(NODE_CLASS_MAPPINGS.keys()), + 'description': 'This extension provides inpainting functionality based on the detector and detailer, along with convenient workflow features like wildcards and logics.', }) +except: + pass diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8192613b32240da4c3cc0eb24bf218d742cda881 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb9d3228fd5d9a35b059a6b5756a43c1d1daca7 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/disable.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/disable.py new file mode 100644 index 0000000000000000000000000000000000000000..900d77e81ebe1e6633a113f06d4f2005a9184c1f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/disable.py @@ -0,0 +1,38 @@ +import os +import sys +import time +import platform +import shutil +import subprocess + +comfy_path = '../..' + +def rmtree(path): + retry_count = 3 + + while True: + try: + retry_count -= 1 + + if platform.system() == "Windows": + subprocess.check_call(['attrib', '-R', path + '\\*', '/S']) + + shutil.rmtree(path) + + return True + + except Exception as ex: + print(f"ex: {ex}") + time.sleep(3) + + if retry_count < 0: + raise ex + + print(f"Uninstall retry({retry_count})") + +js_dest_path = os.path.join(comfy_path, "web", "extensions", "impact-pack") + +if os.path.exists(js_dest_path): + rmtree(js_dest_path) + + diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini new file mode 100644 index 0000000000000000000000000000000000000000..7e7380d4e3297cd16ba17b6594ea4f40bae31a15 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini @@ -0,0 +1,8 @@ +[default] +dependency_version = 22 +mmdet_skip = True +sam_editor_cpu = True +sam_editor_model = sam_vit_b_01ec64.pth +custom_wildcards = C:\Users\ilya9\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\custom_wildcards +disable_gpu_opencv = True + diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ada1a8176b23b97e70d89300c3f06c3c471bec2c --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2f605b1f61b5f53f12f0f02c16297ace09ef38d9 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md @@ -0,0 +1,18 @@ +# ComfyUI-Impact-Subpack +This extension serves as a complement to the Impact Pack, offering features that are not deemed suitable for inclusion by default in the ComfyUI Impact Pack. + +The nodes in this repository cannot be used standalone and depend on [ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-Impact-Pack). + +## Nodes +* UltralyticsDetectorProvider - This node provides an object detection detector based on Ultralystics. + * By using this Detector Provider, you can replace the existing mmdet-based detector. + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +Bing-su/[adetailer](https://github.com/Bing-su/adetailer/) - This repository provides an object detection model and features based on Ultralystics. + +huggingface/Bingsu/[adetailer](https://huggingface.co/Bingsu/adetailer/tree/main) - This repository offers various models based on Ultralystics. +* You can download other models supported by the UltralyticsDetectorProvider from here. diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bafa775f9b4394717a34ab599cfe65d65fe91c49 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9989611269a32a8bdcd0106f56f17331f6d2c757 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py new file mode 100644 index 0000000000000000000000000000000000000000..679856bf17778723ed0e52d6c56a64ca1573ca6b --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py @@ -0,0 +1,234 @@ +from pathlib import Path +from PIL import Image + +import impact.core as core +import cv2 +import numpy as np +from torchvision.transforms.functional import to_pil_image +import torch + +try: + from ultralytics import YOLO +except Exception as e: + print(e) + print(f"\n!!!!!\n\n[ComfyUI-Impact-Subpack] If this error occurs, please check the following link:\n\thttps://github.com/ltdrdata/ComfyUI-Impact-Pack/blob/Main/troubleshooting/TROUBLESHOOTING.md\n\n!!!!!\n") + raise e + + +def load_yolo(model_path: str): + try: + return YOLO(model_path) + except ModuleNotFoundError: + # https://github.com/ultralytics/ultralytics/issues/3856 + YOLO("yolov8n.pt") + return YOLO(model_path) + + +def inference_bbox( + model, + image: Image.Image, + confidence: float = 0.3, + device: str = "", +): + pred = model(image, conf=confidence, device=device) + + bboxes = pred[0].boxes.xyxy.cpu().numpy() + cv2_image = np.array(image) + if len(cv2_image.shape) == 3: + cv2_image = cv2_image[:, :, ::-1].copy() # Convert RGB to BGR for cv2 processing + else: + # Handle the grayscale image here + # For example, you might want to convert it to a 3-channel grayscale image for consistency: + cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_GRAY2BGR) + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for x0, y0, x1, y1 in bboxes: + cv2_mask = np.zeros(cv2_gray.shape, np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + + results = [[], [], [], []] + for i in range(len(bboxes)): + results[0].append(pred[0].names[int(pred[0].boxes[i].cls.item())]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(pred[0].boxes[i].conf.cpu().numpy()) + + return results + + +def inference_segm( + model, + image: Image.Image, + confidence: float = 0.3, + device: str = "", +): + pred = model(image, conf=confidence, device=device) + + bboxes = pred[0].boxes.xyxy.cpu().numpy() + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + + # NOTE: masks.data will be None when n == 0 + segms = pred[0].masks.data.cpu().numpy() + + h_segms = segms.shape[1] + w_segms = segms.shape[2] + h_orig = image.size[1] + w_orig = image.size[0] + ratio_segms = h_segms / w_segms + ratio_orig = h_orig / w_orig + + if ratio_segms == ratio_orig: + h_gap = 0 + w_gap = 0 + elif ratio_segms > ratio_orig: + h_gap = int((ratio_segms - ratio_orig) * h_segms) + w_gap = 0 + else: + h_gap = 0 + ratio_segms = w_segms / h_segms + ratio_orig = w_orig / h_orig + w_gap = int((ratio_segms - ratio_orig) * w_segms) + + results = [[], [], [], []] + for i in range(len(bboxes)): + results[0].append(pred[0].names[int(pred[0].boxes[i].cls.item())]) + results[1].append(bboxes[i]) + + mask = torch.from_numpy(segms[i]) + mask = mask[h_gap:mask.shape[0] - h_gap, w_gap:mask.shape[1] - w_gap] + + scaled_mask = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0), size=(image.size[1], image.size[0]), + mode='bilinear', align_corners=False) + scaled_mask = scaled_mask.squeeze().squeeze() + + results[2].append(scaled_mask.numpy()) + results[3].append(pred[0].boxes[i].conf.cpu().numpy()) + + return results + + +class UltraBBoxDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + detected_results = inference_bbox(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x, label in zip(segmasks, detected_results[0]): + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = core.make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(w, h, item_bbox, crop_region) + + cropped_image = core.crop_image(image, crop_region) + cropped_mask = core.crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = core.SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, label, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + segs = shape, items + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + + def detect_combined(self, image, threshold, dilation): + detected_results = inference_bbox(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + return core.combine_masks(segmasks) + + def setAux(self, x): + pass + + +class UltraSegmDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + detected_results = inference_segm(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x, label in zip(segmasks, detected_results[0]): + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = core.make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(w, h, item_bbox, crop_region) + + cropped_image = core.crop_image(image, crop_region) + cropped_mask = core.crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = core.SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, label, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + segs = shape, items + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + + def detect_combined(self, image, threshold, dilation): + detected_results = inference_segm(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + return core.combine_masks(segmasks) + + def setAux(self, x): + pass \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..fd6b925feab183334d16e9355d73676b7729d6cb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py @@ -0,0 +1,45 @@ +import os +import folder_paths +import impact.core as core +import impact.subcore as subcore +from impact.utils import add_folder_path_and_extensions + +version_code = 22 + +print(f"### Loading: ComfyUI-Impact-Pack (Subpack: V0.6)") + +model_path = folder_paths.models_dir +add_folder_path_and_extensions("ultralytics_bbox", [os.path.join(model_path, "ultralytics", "bbox")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("ultralytics_segm", [os.path.join(model_path, "ultralytics", "segm")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("ultralytics", [os.path.join(model_path, "ultralytics")], folder_paths.supported_pt_extensions) + + +class UltralyticsDetectorProvider: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("ultralytics_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("ultralytics_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR") + FUNCTION = "doit" + + CATEGORY = "ImpactPack" + + def doit(self, model_name): + model_path = folder_paths.get_full_path("ultralytics", model_name) + model = subcore.load_yolo(model_path) + + if model_name.startswith("bbox"): + return subcore.UltraBBoxDetector(model), core.NO_SEGM_DETECTOR() + else: + return subcore.UltraBBoxDetector(model), subcore.UltraSegmDetector(model) + + +NODE_CLASS_MAPPINGS = { + "UltralyticsDetectorProvider": UltralyticsDetectorProvider +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + +} diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py new file mode 100644 index 0000000000000000000000000000000000000000..19756b2018816490d64851b57acff4078a5d0665 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py @@ -0,0 +1,39 @@ +import os +import sys +from torchvision.datasets.utils import download_url + +subpack_path = os.path.join(os.path.dirname(__file__)) + +comfy_path = os.environ.get('COMFYUI_PATH') +if comfy_path is None: + print(f"\n[bold yellow]WARN: The `COMFYUI_PATH` environment variable is not set. Assuming `{os.path.dirname(__file__)}/../../../` as the ComfyUI path.[/bold yellow]", file=sys.stderr) + comfy_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +sys.path.append(comfy_path) + +model_path = os.environ.get('COMFYUI_MODEL_PATH') +if model_path is None: + print(f"\n[bold yellow]WARN: The `COMFYUI_MODEL_PATH` environment variable is not set. Assuming `{model_path}` as the ComfyUI path.[/bold yellow]", file=sys.stderr) + model_path = os.path.abspath(os.path.join(comfy_path, 'models')) + +ultralytics_bbox_path = os.path.join(model_path, "ultralytics", "bbox") +ultralytics_segm_path = os.path.join(model_path, "ultralytics", "segm") + +if not os.path.exists(os.path.join(subpack_path, '..', '..', 'skip_download_model')): + if not os.path.exists(ultralytics_bbox_path): + os.makedirs(ultralytics_bbox_path) + + if not os.path.exists(ultralytics_segm_path): + os.makedirs(ultralytics_segm_path) + + if not os.path.exists(os.path.join(ultralytics_bbox_path, "face_yolov8m.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt", + ultralytics_bbox_path) + + if not os.path.exists(os.path.join(ultralytics_bbox_path, "hand_yolov8s.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt", + ultralytics_bbox_path) + + if not os.path.exists(os.path.join(ultralytics_segm_path, "person_yolov8m-seg.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8m-seg.pt", + ultralytics_segm_path) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3f1f6c1dad4c0a354d3e0fbc24be86362c68116 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt @@ -0,0 +1,2 @@ +matplotlib==3.8 +ultralytics!=8.2.5 diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/install.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/install.py new file mode 100644 index 0000000000000000000000000000000000000000..20b56b22f72b298d8e3f921314e82c90a9960718 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/install.py @@ -0,0 +1,299 @@ +import os +import shutil +import sys +import subprocess +import threading +import locale +import traceback +import re + + +if sys.argv[0] == 'install.py': + sys.path.append('.') # for portable version + + +impact_path = os.path.join(os.path.dirname(__file__), "modules") +old_subpack_path = os.path.join(os.path.dirname(__file__), "subpack") +subpack_path = os.path.join(os.path.dirname(__file__), "impact_subpack") +subpack_repo = "https://github.com/ltdrdata/ComfyUI-Impact-Subpack" + + +comfy_path = os.environ.get('COMFYUI_PATH') +if comfy_path is None: + print(f"\n[bold yellow]WARN: The `COMFYUI_PATH` environment variable is not set. Assuming `{os.path.dirname(__file__)}/../../` as the ComfyUI path.[/bold yellow]", file=sys.stderr) + comfy_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) + +model_path = os.environ.get('COMFYUI_MODEL_PATH') +if model_path is None: + try: + import folder_paths + model_path = folder_paths.models_dir + except: + pass + + if model_path is None: + model_path = os.path.abspath(os.path.join(comfy_path, 'models')) + print(f"\n[bold yellow]WARN: The `COMFYUI_MODEL_PATH` environment variable is not set. Assuming `{model_path}` as the ComfyUI path.[/bold yellow]", file=sys.stderr) + + +sys.path.append(impact_path) +sys.path.append(comfy_path) + + +# --- +def handle_stream(stream, is_stdout): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + + for msg in stream: + if is_stdout: + print(msg, end="", file=sys.stdout) + else: + print(msg, end="", file=sys.stderr) + + +def process_wrap(cmd_str, cwd=None, handler=None, env=None): + print(f"[Impact Pack] EXECUTE: {cmd_str} in '{cwd}'") + process = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, text=True, bufsize=1) + + if handler is None: + handler = handle_stream + + stdout_thread = threading.Thread(target=handler, args=(process.stdout, True)) + stderr_thread = threading.Thread(target=handler, args=(process.stderr, False)) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() +# --- + + +pip_list = None + + +def get_installed_packages(): + global pip_list + + if pip_list is None: + try: + result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) + pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) + except subprocess.CalledProcessError as e: + print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") + return set() + + return pip_list + + +def is_installed(name): + name = name.strip() + pattern = r'([^<>!=]+)([<>!=]=?)' + match = re.search(pattern, name) + + if match: + name = match.group(1) + + result = name.lower() in get_installed_packages() + return result + + +def is_requirements_installed(file_path): + print(f"req_path: {file_path}") + if os.path.exists(file_path): + with open(file_path, 'r') as file: + lines = file.readlines() + for line in lines: + if not is_installed(line): + return False + + return True + +try: + import platform + from torchvision.datasets.utils import download_url + import impact.config + + + print("### ComfyUI-Impact-Pack: Check dependencies") + + if "python_embeded" in sys.executable or "python_embedded" in sys.executable: + pip_install = [sys.executable, '-s', '-m', 'pip', 'install'] + pip_upgrade = [sys.executable, '-s', '-m', 'pip', 'install', '-U'] + mim_install = [sys.executable, '-s', '-m', 'mim', 'install'] + else: + pip_install = [sys.executable, '-m', 'pip', 'install'] + pip_upgrade = [sys.executable, '-m', 'pip', 'install', '-U'] + mim_install = [sys.executable, '-m', 'mim', 'install'] + + + def ensure_subpack(): + import git + if os.path.exists(subpack_path): + try: + repo = git.Repo(subpack_path) + repo.remotes.origin.pull() + except: + traceback.print_exc() + if platform.system() == 'Windows': + print(f"[ComfyUI-Impact-Pack] Please turn off ComfyUI and remove '{subpack_path}' and restart ComfyUI.") + else: + shutil.rmtree(subpack_path) + git.Repo.clone_from(subpack_repo, subpack_path) + else: + git.Repo.clone_from(subpack_repo, subpack_path) + + if os.path.exists(old_subpack_path): + shutil.rmtree(old_subpack_path) + + + def ensure_pip_packages_first(): + subpack_req = os.path.join(subpack_path, "requirements.txt") + if os.path.exists(subpack_req) and not is_requirements_installed(subpack_req): + process_wrap(pip_install + ['-r', 'requirements.txt'], cwd=subpack_path) + + if not impact.config.get_config()['mmdet_skip']: + process_wrap(pip_install + ['openmim']) + + try: + import pycocotools + except Exception: + if platform.system() not in ["Windows"] or platform.machine() not in ["AMD64", "x86_64"]: + print(f"Your system is {platform.system()}; !! You need to install 'libpython3-dev' for this step. !!") + + process_wrap(pip_install + ['pycocotools']) + else: + pycocotools = { + (3, 8): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp38-cp38-win_amd64.whl", + (3, 9): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp39-cp39-win_amd64.whl", + (3, 10): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp310-cp310-win_amd64.whl", + (3, 11): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp311-cp311-win_amd64.whl", + } + + version = sys.version_info[:2] + url = pycocotools[version] + process_wrap(pip_install + [url]) + + + def ensure_pip_packages_last(): + my_path = os.path.dirname(__file__) + requirements_path = os.path.join(my_path, "requirements.txt") + + if not is_requirements_installed(requirements_path): + process_wrap(pip_install + ['-r', requirements_path]) + + # fallback + try: + import segment_anything + from skimage.measure import label, regionprops + import piexif + except Exception: + process_wrap(pip_install + ['-r', requirements_path]) + + # !! cv2 importing test must be very last !! + try: + from cv2 import setNumThreads + except Exception: + try: + is_open_cv_installed = False + + # upgrade if opencv is installed already + if is_installed('opencv-python'): + process_wrap(pip_upgrade + ['opencv-python']) + is_open_cv_installed = True + + if is_installed('opencv-python-headless'): + process_wrap(pip_upgrade + ['opencv-python-headless']) + is_open_cv_installed = True + + if is_installed('opencv-contrib-python'): + process_wrap(pip_upgrade + ['opencv-contrib-python']) + is_open_cv_installed = True + + if is_installed('opencv-contrib-python-headless'): + process_wrap(pip_upgrade + ['opencv-contrib-python-headless']) + is_open_cv_installed = True + + # if opencv is not installed install `opencv-python-headless` + if not is_open_cv_installed: + process_wrap(pip_install + ['opencv-python-headless']) + except: + print(f"[ERROR] ComfyUI-Impact-Pack: failed to install 'opencv-python'. Please, install manually.") + + def ensure_mmdet_package(): + try: + import mmcv + import mmdet + from mmdet.evaluation import get_classes + except Exception: + process_wrap(pip_install + ['opendatalab==0.0.9']) + process_wrap(pip_install + ['-U', 'openmim']) + process_wrap(mim_install + ['mmcv>=2.0.0rc4, <2.1.0']) + process_wrap(mim_install + ['mmdet==3.0.0']) + process_wrap(mim_install + ['mmengine==0.7.4']) + + + def install(): + subpack_install_script = os.path.join(subpack_path, "install.py") + + print(f"### ComfyUI-Impact-Pack: Updating subpack") + try: + import git + except Exception: + if not is_installed('GitPython'): + process_wrap(pip_install + ['GitPython']) + + ensure_subpack() # The installation of the subpack must take place before ensure_pip. cv2 triggers a permission error. + + new_env = os.environ.copy() + new_env["COMFYUI_PATH"] = comfy_path + new_env["COMFYUI_MODEL_PATH"] = model_path + + if os.path.exists(subpack_install_script): + process_wrap([sys.executable, 'install.py'], cwd=subpack_path, env=new_env) + if not is_requirements_installed(os.path.join(subpack_path, 'requirements.txt')): + process_wrap(pip_install + ['-r', 'requirements.txt'], cwd=subpack_path) + else: + print(f"### ComfyUI-Impact-Pack: (Install Failed) Subpack\nFile not found: `{subpack_install_script}`") + + ensure_pip_packages_first() + + if not impact.config.get_config()['mmdet_skip']: + ensure_mmdet_package() + + ensure_pip_packages_last() + + # Download model + print("### ComfyUI-Impact-Pack: Check basic models") + bbox_path = os.path.join(model_path, "mmdets", "bbox") + sam_path = os.path.join(model_path, "sams") + onnx_path = os.path.join(model_path, "onnx") + + if not os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'skip_download_model')): + if not os.path.exists(bbox_path): + os.makedirs(bbox_path) + + if not impact.config.get_config()['mmdet_skip']: + if not os.path.exists(os.path.join(bbox_path, "mmdet_anime-face_yolov3.pth")): + download_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path) + + if not os.path.exists(os.path.join(bbox_path, "mmdet_anime-face_yolov3.py")): + download_url("https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_anime-face_yolov3.py", bbox_path) + + if not os.path.exists(os.path.join(sam_path, "sam_vit_b_01ec64.pth")): + download_url("https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", sam_path) + + if not os.path.exists(onnx_path): + print(f"### ComfyUI-Impact-Pack: onnx model directory created ({onnx_path})") + os.mkdir(onnx_path) + + impact.config.write_config() + + + install() + +except Exception as e: + print("[ERROR] ComfyUI-Impact-Pack: Dependency installation has failed. Please install manually.") + traceback.print_exc() diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js new file mode 100644 index 0000000000000000000000000000000000000000..86982f134de4b3962b5e8287920964e33eb84f8d --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js @@ -0,0 +1,35 @@ +import { ComfyApp, app } from "../../scripts/app.js"; + +let conflict_check = undefined; + +app.registerExtension({ + name: "Comfy.impact.comboBoolMigration", + + nodeCreated(node, app) { + for(let i in node.widgets) { + let widget = node.widgets[i]; + + if(conflict_check == undefined) { + conflict_check = !!app.extensions.find((ext) => ext.name === "Comfy.comboBoolMigration"); + } + + if(conflict_check) + return; + + if(widget.type == "toggle") { + let value = widget.value; + + var v = Object.getOwnPropertyDescriptor(widget, 'value'); + if(!v) { + Object.defineProperty(widget, "value", { + set: (value) => { + delete widget.value; + widget.value = value == true || value == widget.options.on; + }, + get: () => { return value; } + }); + } + } + } + } +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/common.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/common.js new file mode 100644 index 0000000000000000000000000000000000000000..aa5fbd5b7d96a8b60b058522a47bc9df5d7163e0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/common.js @@ -0,0 +1,95 @@ +import { api } from "../../scripts/api.js"; +import { app } from "../../scripts/app.js"; + +let original_show = app.ui.dialog.show; + +function dialog_show_wrapper(html) { + if (typeof html === "string") { + if(html.includes("IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE")) { + return; + } + + this.textElement.innerHTML = html; + } else { + this.textElement.replaceChildren(html); + } + this.element.style.display = "flex"; +} + +app.ui.dialog.show = dialog_show_wrapper; + + +function nodeFeedbackHandler(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const w = node.widgets.find((w) => event.detail.widget_name === w.name); + if(w) { + w.value = event.detail.value; + } + } +} + +api.addEventListener("impact-node-feedback", nodeFeedbackHandler); + + +function setMuteState(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + if(event.detail.is_active) + node.mode = 0; + else + node.mode = 2; + } +} + +api.addEventListener("impact-node-mute-state", setMuteState); + + +async function bridgeContinue(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const mutes = new Set(event.detail.mutes); + const actives = new Set(event.detail.actives); + const bypasses = new Set(event.detail.bypasses); + + for(let i in app.graph._nodes_by_id) { + let this_node = app.graph._nodes_by_id[i]; + if(mutes.has(i)) { + this_node.mode = 2; + } + else if(actives.has(i)) { + this_node.mode = 0; + } + else if(bypasses.has(i)) { + this_node.mode = 4; + } + } + + await app.queuePrompt(0, 1); + } +} + +api.addEventListener("impact-bridge-continue", bridgeContinue); + + +function addQueue(event) { + app.queuePrompt(0, 1); +} + +api.addEventListener("impact-add-queue", addQueue); + + +function refreshPreview(event) { + let node_id = event.detail.node_id; + let item = event.detail.item; + let img = new Image(); + img.src = `/view?filename=${item.filename}&subfolder=${item.subfolder}&type=${item.type}&no-cache=${Date.now()}`; + let node = app.graph._nodes_by_id[node_id]; + if(node) + node.imgs = [img]; +} + +api.addEventListener("impact-preview", refreshPreview); diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js new file mode 100644 index 0000000000000000000000000000000000000000..678d60ad5766b80b7deeb538e0c63bc9e6384d19 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js @@ -0,0 +1,229 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; + +function load_image(str) { + let base64String = canvas.toDataURL('image/png'); + let img = new Image(); + img.src = base64String; +} + +function getFileItem(baseType, path) { + try { + let pathType = baseType; + + if (path.endsWith("[output]")) { + pathType = "output"; + path = path.slice(0, -9); + } else if (path.endsWith("[input]")) { + pathType = "input"; + path = path.slice(0, -8); + } else if (path.endsWith("[temp]")) { + pathType = "temp"; + path = path.slice(0, -7); + } + + const subfolder = path.substring(0, path.lastIndexOf('/')); + const filename = path.substring(path.lastIndexOf('/') + 1); + + return { + filename: filename, + subfolder: subfolder, + type: pathType + }; + } + catch(exception) { + return null; + } +} + +async function loadImageFromUrl(image, node_id, v, need_to_load) { + let item = getFileItem('temp', v); + + if(item) { + let params = `?node_id=${node_id}&filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = await api.fetchApi('/impact/set/pb_id_image'+params, { cache: "no-store" }); + if(res.status == 200) { + let pb_id = await res.text(); + if(need_to_load) {; + image.src = api.apiURL(`/view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`); + } + return pb_id; + } + else { + return `$${node_id}-0`; + } + } + else { + return `$${node_id}-0`; + } +} + +async function loadImageFromId(image, v) { + let res = await api.fetchApi('/impact/get/pb_id_image?id='+v, { cache: "no-store" }); + if(res.status == 200) { + let item = await res.json(); + image.src = api.apiURL(`/view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`); + return true; + } + + return false; +} + +app.registerExtension({ + name: "Comfy.Impact.img", + + nodeCreated(node, app) { + if(node.comfyClass == "PreviewBridge" || node.comfyClass == "PreviewBridgeLatent") { + let w = node.widgets.find(obj => obj.name === 'image'); + node._imgs = [new Image()]; + node.imageIndex = 0; + + Object.defineProperty(w, 'value', { + async set(v) { + if(w._lock) + return; + + const stackTrace = new Error().stack; + if(stackTrace.includes('presetText.js')) + return; + + var image = new Image(); + if(v && v.constructor == String && v.startsWith('$')) { + // from node feedback + let need_to_load = node._imgs[0].src == ''; + if(await loadImageFromId(image, v, need_to_load)) { + w._value = v; + if(node._imgs[0].src == '') { + node._imgs = [image]; + } + } + else { + w._value = `$${node.id}-0`; + } + } + else { + // from clipspace + w._lock = true; + w._value = await loadImageFromUrl(image, node.id, v, false); + w._lock = false; + } + }, + get() { + if(w._value == undefined) { + w._value = `$${node.id}-0`; + } + return w._value; + } + }); + + Object.defineProperty(node, 'imgs', { + set(v) { + const stackTrace = new Error().stack; + if(v && v.length == 0) + return; + else if(stackTrace.includes('pasteFromClipspace')) { + let sp = new URLSearchParams(v[0].src.split("?")[1]); + let str = ""; + if(sp.get('subfolder')) { + str += sp.get('subfolder') + '/'; + } + str += `${sp.get("filename")} [${sp.get("type")}]`; + + w.value = str; + } + + node._imgs = v; + }, + get() { + return node._imgs; + } + }); + } + + if(node.comfyClass == "ImageReceiver") { + let path_widget = node.widgets.find(obj => obj.name === 'image'); + let w = node.widgets.find(obj => obj.name === 'image_data'); + let stw_widget = node.widgets.find(obj => obj.name === 'save_to_workflow'); + w._value = ""; + + Object.defineProperty(w, 'value', { + set(v) { + if(v != '[IMAGE DATA]') + w._value = v; + }, + get() { + const stackTrace = new Error().stack; + if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) { + return "[IMAGE DATA]"; + } + else { + if(stw_widget.value) + return w._value; + else + return ""; + } + } + }); + + let set_img_act = (v) => { + node._img = v; + var canvas = document.createElement('canvas'); + canvas.width = v[0].width; + canvas.height = v[0].height; + + var context = canvas.getContext('2d'); + context.drawImage(v[0], 0, 0, v[0].width, v[0].height); + + var base64Image = canvas.toDataURL('image/png'); + w.value = base64Image; + }; + + Object.defineProperty(node, 'imgs', { + set(v) { + if (v && !v[0].complete) { + let orig_onload = v[0].onload; + v[0].onload = function(v2) { + if(orig_onload) + orig_onload(); + set_img_act(v); + }; + } + else { + set_img_act(v); + } + }, + get() { + if(this._img == undefined && w.value != '') { + this._img = [new Image()]; + if(stw_widget.value && w.value != '[IMAGE DATA]') + this._img[0].src = w.value; + } + else if(this._img == undefined && path_widget.value) { + let image = new Image(); + image.src = path_widget.value; + + try { + let item = getFileItem('temp', path_widget.value); + let params = `?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = api.fetchApi('/view/validate'+params, { cache: "no-store" }).then(response => response); + if(res.status == 200) { + image.src = api.apiURL('/view'+params); + } + + this._img = [new Image()]; // placeholder + image.onload = function(v) { + set_img_act([image]); + }; + } + catch { + + } + } + return this._img; + } + }); + } + } +}) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js new file mode 100644 index 0000000000000000000000000000000000000000..0c7979a72c3ad15ec7a6a663851048b0be8a5079 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js @@ -0,0 +1,815 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +let wildcards_list = []; +async function load_wildcards() { + let res = await api.fetchApi('/impact/wildcards/list'); + let data = await res.json(); + wildcards_list = data.data; +} + +load_wildcards(); + +export function get_wildcards_list() { + return wildcards_list; +} + +// temporary implementation (copying from https://github.com/pythongosssss/ComfyUI-WD14-Tagger) +// I think this should be included into master!! +class ImpactProgressBadge { + constructor() { + if (!window.__progress_badge__) { + window.__progress_badge__ = Symbol("__impact_progress_badge__"); + } + this.symbol = window.__progress_badge__; + } + + getState(node) { + return node[this.symbol] || {}; + } + + setState(node, state) { + node[this.symbol] = state; + app.canvas.setDirty(true); + } + + addStatusHandler(nodeType) { + if (nodeType[this.symbol]?.statusTagHandler) { + return; + } + if (!nodeType[this.symbol]) { + nodeType[this.symbol] = {}; + } + nodeType[this.symbol] = { + statusTagHandler: true, + }; + + api.addEventListener("impact/update_status", ({ detail }) => { + let { node, progress, text } = detail; + const n = app.graph.getNodeById(+(node || app.runningNodeId)); + if (!n) return; + const state = this.getState(n); + state.status = Object.assign(state.status || {}, { progress: text ? progress : null, text: text || null }); + this.setState(n, state); + }); + + const self = this; + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + const state = self.getState(this); + if (!state?.status?.text) { + return r; + } + + const { fgColor, bgColor, text, progress, progressColor } = { ...state.status }; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor || "dodgerblue"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + if (progress) { + ctx.fillStyle = progressColor || "green"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, (sz.width + 12) * progress, 20, 5); + ctx.fill(); + } + + ctx.fillStyle = fgColor || "#fff"; + ctx.fillText(text, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + return r; + }; + } +} + +const input_tracking = {}; +const input_dirty = {}; +const output_tracking = {}; + +function progressExecuteHandler(event) { + if(event.detail.output.aux){ + const id = event.detail.node; + if(input_tracking.hasOwnProperty(id)) { + if(input_tracking.hasOwnProperty(id) && input_tracking[id][0] != event.detail.output.aux[0]) { + input_dirty[id] = true; + } + else{ + + } + } + + input_tracking[id] = event.detail.output.aux; + } +} + +function imgSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImageReceiver') { + let is_linked = false; + + if(nodes[i].widgets[1].type == 'converted-widget') { + for(let j in nodes[i].inputs) { + let input = nodes[i].inputs[j]; + if(input.name === 'link_id') { + if(input.link) { + let src_node = app.graph._nodes_by_id[app.graph.links[input.link].origin_id]; + if(src_node.type == 'ImpactInt' || src_node.type == 'PrimitiveNode') { + is_linked = true; + } + } + break; + } + } + } + else if(nodes[i].widgets[1].value == event.detail.link_id) { + is_linked = true; + } + + if(is_linked) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.onload = (event) => { + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + app.canvas.setDirty(true); + }; + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + } + } + } + } +} + + +function latentSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'LatentReceiver') { + if(nodes[i].widgets[1].value == event.detail.link_id) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + } + } + } + } +} + + +function valueSendHandler(event) { + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImpactValueReceiver') { + if(nodes[i].widgets[2].value == event.detail.link_id) { + nodes[i].widgets[1].value = event.detail.value; + + let typ = typeof event.detail.value; + if(typ == 'string') { + nodes[i].widgets[0].value = "STRING"; + } + else if(typ == "boolean") { + nodes[i].widgets[0].value = "BOOLEAN"; + } + else if(typ != "number") { + nodes[i].widgets[0].value = typeof event.detail.value; + } + else if(Number.isInteger(event.detail.value)) { + nodes[i].widgets[0].value = "INT"; + } + else { + nodes[i].widgets[0].value = "FLOAT"; + } + } + } + } +} + + +const impactProgressBadge = new ImpactProgressBadge(); + +api.addEventListener("stop-iteration", () => { + document.getElementById("autoQueueCheckbox").checked = false; +}); +api.addEventListener("value-send", valueSendHandler); +api.addEventListener("img-send", imgSendHandler); +api.addEventListener("latent-send", latentSendHandler); +api.addEventListener("executed", progressExecuteHandler); + +app.registerExtension({ + name: "Comfy.Impack", + loadedGraphNode(node, app) { + if (node.comfyClass == "MaskPainter") { + input_dirty[node.id + ""] = true; + } + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name == "IterativeLatentUpscale" || nodeData.name == "IterativeImageUpscale" + || nodeData.name == "RegionalSampler"|| nodeData.name == "RegionalSamplerAdvanced") { + impactProgressBadge.addStatusHandler(nodeType); + } + + if(nodeData.name == "ImpactControlBridge") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*') + return; + + // assign type + let slot_type = '*'; + + if(type == 2) { + slot_type = link_info.type; + } + else { + const node = app.graph.getNodeById(link_info.origin_id); + slot_type = node.outputs[link_info.origin_slot].type; + } + + this.inputs[0].type = slot_type; + this.outputs[0].type = slot_type; + this.outputs[0].label = slot_type; + } + } + + if(nodeData.name == "ImpactConditionalBranch" || nodeData.name == "ImpactConditionalBranchSelMode") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*') + return; + + if(index >= 2) + return; + + // assign type + let slot_type = '*'; + + if(type == 2) { + slot_type = link_info.type; + } + else { + const node = app.graph.getNodeById(link_info.origin_id); + slot_type = node.outputs[link_info.origin_slot].type; + } + + this.inputs[0].type = slot_type; + this.inputs[1].type = slot_type; + this.outputs[0].type = slot_type; + this.outputs[0].label = slot_type; + } + } + + if(nodeData.name == "ImpactCompare") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*' || type == 2) + return; + + // assign type + const node = app.graph.getNodeById(link_info.origin_id); + let slot_type = node.outputs[link_info.origin_slot].type; + + this.inputs[0].type = slot_type; + this.inputs[1].type = slot_type; + } + } + + if(nodeData.name === 'ImpactInversedSwitch') { + nodeData.output = ['*']; + nodeData.output_is_list = [false]; + nodeData.output_name = ['output1']; + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info) + return; + + if(type == 2) { + // connect output + if(connected){ + if(app.graph._nodes_by_id[link_info.target_id].type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = link_info.type; + } + } + } + } + } + else { + if(app.graph._nodes_by_id[link_info.origin_id].type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node.outputs[link_info.origin_slot].type; + + if(origin_type == '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].name = origin_type; + } + + return; + } + + if (!connected && this.outputs.length > 1) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData')) { + if(this.outputs[link_info.origin_slot].links.length == 0) + this.removeOutput(link_info.origin_slot); + } + } + + let slot_i = 1; + for (let i = 0; i < this.outputs.length; i++) { + this.outputs[i].name = `output${slot_i}` + slot_i++; + } + + let last_slot = this.outputs[this.outputs.length - 1]; + if (last_slot.slot_index == link_info.origin_slot) { + this.addOutput(`output${slot_i}`, this.outputs[0].type); + } + + let select_slot = this.inputs.find(x => x.name == "select"); + if(this.widgets) { + this.widgets[0].options.max = select_slot?this.outputs.length-1:this.outputs.length; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + if(this.widgets[0].options.max > 0 && this.widgets[0].value == 0) + this.widgets[0].value = 1; + } + } + } + + if (nodeData.name === 'ImpactMakeImageList' || nodeData.name === 'ImpactMakeImageBatch' || + nodeData.name === 'CombineRegionalPrompts' || + nodeData.name === 'ImpactCombineConditionings' || nodeData.name === 'ImpactConcatConditionings' || + nodeData.name === 'ImpactSEGSConcat' || + nodeData.name === 'ImpactSwitch' || nodeData.name === 'LatentSwitch' || nodeData.name == 'SEGSSwitch') { + var input_name = "input"; + + switch(nodeData.name) { + case 'ImpactMakeImageList': + case 'ImpactMakeImageBatch': + input_name = "image"; + break; + + case 'ImpactSEGSConcat': + input_name = "segs"; + break; + + case 'CombineRegionalPrompts': + input_name = "regional_prompts"; + break; + + case 'ImpactCombineConditionings': + case 'ImpactConcatConditionings': + input_name = "conditioning"; + break; + + case 'LatentSwitch': + input_name = "input"; + break; + + case 'SEGSSwitch': + input_name = "input"; + break; + + case 'ImpactSwitch': + input_name = "input"; + } + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info) + return; + + if(type == 2) { + // connect output + if(connected && index == 0){ + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.target_id]?.type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].label = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = link_info.type; + } + } + } + } + + return; + } + else { + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.origin_id].type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[index].name == 'select' || this.inputs[index].name == 'sel_mode') + return; + + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node.outputs[link_info.origin_slot].type; + + if(origin_type == '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].label = origin_type; + this.outputs[0].name = origin_type; + } + } + + let select_slot = this.inputs.find(x => x.name == "select"); + let mode_slot = this.inputs.find(x => x.name == "sel_mode"); + + let converted_count = 0; + converted_count += select_slot?1:0; + converted_count += mode_slot?1:0; + + if (!connected && (this.inputs.length > 1+converted_count)) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData') && + this.inputs[index].name != 'select') { + this.removeInput(index); + } + } + + let slot_i = 1; + for (let i = 0; i < this.inputs.length; i++) { + let input_i = this.inputs[i]; + if(input_i.name != 'select'&& input_i.name != 'sel_mode') { + input_i.name = `${input_name}${slot_i}` + slot_i++; + } + } + + let last_slot = this.inputs[this.inputs.length - 1]; + if ( + (last_slot.name == 'select' && last_slot.name != 'sel_mode' && this.inputs[this.inputs.length - 2].link != undefined) + || (last_slot.name != 'select' && last_slot.name != 'sel_mode' && last_slot.link != undefined)) { + this.addInput(`${input_name}${slot_i}`, this.outputs[0].type); + } + + if(this.widgets) { + this.widgets[0].options.max = select_slot?this.inputs.length-1:this.inputs.length; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + if(this.widgets[0].options.max > 0 && this.widgets[0].value == 0) + this.widgets[0].value = 1; + } + } + } + }, + + nodeCreated(node, app) { + if(node.comfyClass == "MaskPainter") { + node.addWidget("button", "Edit mask", null, () => { + ComfyApp.copyToClipspace(node); + ComfyApp.clipspace_return_node = node; + ComfyApp.open_maskeditor(); + }); + } + + switch(node.comfyClass) { + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + case "EditDetailerPipe": + case "FaceDetailer": + case "DetailerForEach": + case "DetailerForEachDebug": + case "DetailerForEachPipe": + case "DetailerForEachDebugPipe": + { + for(let i in node.widgets) { + let widget = node.widgets[i]; + if(widget.type === "customtext") { + widget.dynamicPrompts = false; + widget.inputEl.placeholder = "wildcard spec: if kept empty, this option will be ignored"; + widget.serializeValue = () => { + return node.widgets[i].value; + }; + } + } + } + break; + } + + if(node.comfyClass == "ImpactSEGSLabelFilter" || node.comfyClass == "SEGSLabelFilterDetailerHookProvider") { + Object.defineProperty(node.widgets[0], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(node.widgets[1].value.trim() != "" && !node.widgets[1].value.trim().endsWith(",")) + node.widgets[1].value += ", " + + node.widgets[1].value += value; + node.widgets_values[1] = node.widgets[1].value; + } + + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + + if(node.comfyClass == "UltralyticsDetectorProvider") { + let model_name_widget = node.widgets.find((w) => w.name === "model_name"); + let orig_draw = node.onDrawForeground; + node.onDrawForeground = function (ctx) { + const r = orig_draw?.apply?.(this, arguments); + + let is_seg = model_name_widget.value?.startsWith('segm/') || model_name_widget.value?.includes('-seg'); + if(!is_seg) { + var slot_pos = new Float32Array(2); + var pos = node.getConnectionPos(false, 1, slot_pos); + + pos[0] -= node.pos[0] - 10; + pos[1] -= node.pos[1]; + + ctx.beginPath(); + ctx.strokeStyle = "red"; + ctx.lineWidth = 4; + ctx.moveTo(pos[0] - 5, pos[1] - 5); + ctx.lineTo(pos[0] + 5, pos[1] + 5); + ctx.moveTo(pos[0] + 5, pos[1] - 5); + ctx.lineTo(pos[0] - 5, pos[1] + 5); + ctx.stroke(); + } + } + } + + if( + node.comfyClass == "ImpactWildcardEncode" || node.comfyClass == "ImpactWildcardProcessor" + || node.comfyClass == "ToDetailerPipe" || node.comfyClass == "ToDetailerPipeSDXL" + || node.comfyClass == "EditDetailerPipe" || node.comfyClass == "EditDetailerPipeSDXL" + || node.comfyClass == "BasicPipeToDetailerPipe" || node.comfyClass == "BasicPipeToDetailerPipeSDXL") { + node._value = "Select the LoRA to add to the text"; + node._wvalue = "Select the Wildcard to add to the text"; + + var tbox_id = 0; + var combo_id = 3; + var has_lora = true; + + switch(node.comfyClass){ + case "ImpactWildcardEncode": + tbox_id = 0; + combo_id = 3; + break; + + case "ImpactWildcardProcessor": + tbox_id = 0; + combo_id = 4; + has_lora = false; + break; + + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "EditDetailerPipe": + case "EditDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + tbox_id = 0; + combo_id = 1; + break; + } + + Object.defineProperty(node.widgets[combo_id+1], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the Wildcard to add to the text") { + if(node.widgets[tbox_id].value != '') + node.widgets[tbox_id].value += ', ' + + node.widgets[tbox_id].value += value; + } + } + }, + get: () => { return "Select the Wildcard to add to the text"; } + }); + + Object.defineProperty(node.widgets[combo_id+1].options, "values", { + set: (x) => {}, + get: () => { + return wildcards_list; + } + }); + + if(has_lora) { + Object.defineProperty(node.widgets[combo_id], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the LoRA to add to the text") { + let lora_name = value; + if (lora_name.endsWith('.safetensors')) { + lora_name = lora_name.slice(0, -12); + } + + node.widgets[tbox_id].value += ``; + if(node.widgets_values) { + node.widgets_values[tbox_id] = node.widgets[tbox_id].value; + } + } + } + + node._value = value; + }, + + get: () => { return "Select the LoRA to add to the text"; } + }); + } + + // Preventing validation errors from occurring in any situation. + if(has_lora) { + node.widgets[combo_id].serializeValue = () => { return "Select the LoRA to add to the text"; } + } + node.widgets[combo_id+1].serializeValue = () => { return "Select the Wildcard to add to the text"; } + } + + if(node.comfyClass == "ImpactWildcardProcessor" || node.comfyClass == "ImpactWildcardEncode") { + node.widgets[0].inputEl.placeholder = "Wildcard Prompt (User input)"; + node.widgets[1].inputEl.placeholder = "Populated Prompt (Will be generated automatically)"; + node.widgets[1].inputEl.disabled = true; + + const populated_text_widget = node.widgets.find((w) => w.name == 'populated_text'); + const mode_widget = node.widgets.find((w) => w.name == 'mode'); + + // mode combo + Object.defineProperty(mode_widget, "value", { + set: (value) => { + node._mode_value = value == true || value == "Populate"; + populated_text_widget.inputEl.disabled = value == true || value == "Populate"; + }, + get: () => { + if(node._mode_value != undefined) + return node._mode_value; + else + return true; + } + }); + } + + if (node.comfyClass == "MaskPainter") { + node.widgets[0].value = '#placeholder'; + + Object.defineProperty(node, "images", { + set: function(value) { + node._images = value; + }, + get: function() { + const id = node.id+""; + if(node.widgets[0].value != '#placeholder') { + var need_invalidate = false; + + if(input_dirty.hasOwnProperty(id) && input_dirty[id]) { + node.widgets[0].value = {...input_tracking[id][1]}; + input_dirty[id] = false; + need_invalidate = true + this._images = app.nodeOutputs[id].images; + } + + let filename = app.nodeOutputs[id]['aux'][1][0]['filename']; + let subfolder = app.nodeOutputs[id]['aux'][1][0]['subfolder']; + let type = app.nodeOutputs[id]['aux'][1][0]['type']; + + let item = + { + image_hash: app.nodeOutputs[id]['aux'][0], + forward_filename: app.nodeOutputs[id]['aux'][1][0]['filename'], + forward_subfolder: app.nodeOutputs[id]['aux'][1][0]['subfolder'], + forward_type: app.nodeOutputs[id]['aux'][1][0]['type'] + }; + + if(node._images) { + app.nodeOutputs[id].images = [{ + ...node._images[0], + ...item + }]; + + node.widgets[0].value = + { + ...node._images[0], + ...item + }; + } + else { + app.nodeOutputs[id].images = [{ + ...item + }]; + + node.widgets[0].value = + { + ...item + }; + } + + if(need_invalidate) { + Promise.all( + app.nodeOutputs[id].images.map((src) => { + return new Promise((r) => { + const img = new Image(); + img.onload = () => r(img); + img.onerror = () => r(null); + img.src = "/view?" + new URLSearchParams(src).toString(); + }); + }) + ).then((imgs) => { + this.imgs = imgs.filter(Boolean); + this.setSizeForImage?.(); + app.graph.setDirtyCanvas(true); + }); + + app.nodeOutputs[id].images[0] = { ...node.widgets[0].value }; + } + + return app.nodeOutputs[id].images; + } + else { + return node._images; + } + } + }); + } + } +}); diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js new file mode 100644 index 0000000000000000000000000000000000000000..e7bf6f297fc67d41c3e25e6541cd0df98b51fdd1 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js @@ -0,0 +1,637 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { ComfyApp } from "../../scripts/app.js"; +import { ClipspaceDialog } from "../../extensions/core/clipspace.js"; + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +// Helper function to convert a data URL to a Blob object +function dataURLToBlob(dataURL) { + const parts = dataURL.split(';base64,'); + const contentType = parts[0].split(':')[1]; + const byteString = atob(parts[1]); + const arrayBuffer = new ArrayBuffer(byteString.length); + const uint8Array = new Uint8Array(arrayBuffer); + for (let i = 0; i < byteString.length; i++) { + uint8Array[i] = byteString.charCodeAt(i); + } + return new Blob([arrayBuffer], { type: contentType }); +} + +function loadedImageToBlob(image) { + const canvas = document.createElement('canvas'); + + canvas.width = image.width; + canvas.height = image.height; + + const ctx = canvas.getContext('2d'); + + ctx.drawImage(image, 0, 0); + + const dataURL = canvas.toDataURL('image/png', 1); + const blob = dataURLToBlob(dataURL); + + return blob; +} + +async function uploadMask(filepath, formData) { + await api.fetchApi('/upload/mask', { + method: 'POST', + body: formData + }).then(response => {}).catch(error => { + console.error('Error:', error); + }); + + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = `view?filename=${filepath.filename}&type=${filepath.type}`; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; + + ClipspaceDialog.invalidatePreview(); +} + +class ImpactSamEditorDialog extends ComfyDialog { + static instance = null; + + static getInstance() { + if(!ImpactSamEditorDialog.instance) { + ImpactSamEditorDialog.instance = new ImpactSamEditorDialog(); + } + + return ImpactSamEditorDialog.instance; + } + + constructor() { + super(); + this.element = $el("div.comfy-modal", { parent: document.body }, + [ $el("div.comfy-modal-content", + [...this.createButtons()]), + ]); + } + + createButtons() { + return []; + } + + createButton(name, callback) { + var button = document.createElement("button"); + button.innerText = name; + button.addEventListener("click", callback); + return button; + } + + createLeftButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "left"; + button.style.marginRight = "4px"; + return button; + } + + createRightButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "right"; + button.style.marginLeft = "4px"; + return button; + } + + createLeftSlider(self, name, callback) { + const divElement = document.createElement('div'); + divElement.id = "sam-confidence-slider"; + divElement.style.cssFloat = "left"; + divElement.style.fontFamily = "sans-serif"; + divElement.style.marginRight = "4px"; + divElement.style.color = "var(--input-text)"; + divElement.style.backgroundColor = "var(--comfy-input-bg)"; + divElement.style.borderRadius = "8px"; + divElement.style.borderColor = "var(--border-color)"; + divElement.style.borderStyle = "solid"; + divElement.style.fontSize = "15px"; + divElement.style.height = "21px"; + divElement.style.padding = "1px 6px"; + divElement.style.display = "flex"; + divElement.style.position = "relative"; + divElement.style.top = "2px"; + self.confidence_slider_input = document.createElement('input'); + self.confidence_slider_input.setAttribute('type', 'range'); + self.confidence_slider_input.setAttribute('min', '0'); + self.confidence_slider_input.setAttribute('max', '100'); + self.confidence_slider_input.setAttribute('value', '70'); + const labelElement = document.createElement("label"); + labelElement.textContent = name; + + divElement.appendChild(labelElement); + divElement.appendChild(self.confidence_slider_input); + + self.confidence_slider_input.addEventListener("change", callback); + + return divElement; + } + + async detect_and_invalidate_mask_canvas(self) { + const mask_img = await self.detect(self); + + const canvas = self.maskCtx.canvas; + const ctx = self.maskCtx; + + ctx.clearRect(0, 0, canvas.width, canvas.height); + + await new Promise((resolve, reject) => { + self.mask_image = new Image(); + self.mask_image.onload = function() { + ctx.drawImage(self.mask_image, 0, 0, canvas.width, canvas.height); + resolve(); + }; + self.mask_image.onerror = reject; + self.mask_image.src = mask_img.src; + }); + } + + setlayout(imgCanvas, maskCanvas, pointsCanvas) { + const self = this; + + // If it is specified as relative, using it only as a hidden placeholder for padding is recommended + // to prevent anomalies where it exceeds a certain size and goes outside of the window. + var placeholder = document.createElement("div"); + placeholder.style.position = "relative"; + placeholder.style.height = "50px"; + + var bottom_panel = document.createElement("div"); + bottom_panel.style.position = "absolute"; + bottom_panel.style.bottom = "0px"; + bottom_panel.style.left = "20px"; + bottom_panel.style.right = "20px"; + bottom_panel.style.height = "50px"; + + var brush = document.createElement("div"); + brush.id = "sam-brush"; + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + brush.style.borderRadius = "50%"; + brush.style.MozBorderRadius = "50%"; + brush.style.WebkitBorderRadius = "50%"; + brush.style.position = "absolute"; + brush.style.zIndex = 100; + brush.style.pointerEvents = "none"; + this.brush = brush; + this.element.appendChild(imgCanvas); + this.element.appendChild(maskCanvas); + this.element.appendChild(pointsCanvas); + this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button + this.element.appendChild(bottom_panel); + document.body.appendChild(brush); + this.brush_size = 5; + + var confidence_slider = this.createLeftSlider(self, "Confidence", (event) => { + self.confidence = event.target.value; + }); + + var clearButton = this.createLeftButton("Clear", () => { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + + self.prompt_points = []; + + self.invalidatePointsCanvas(self); + }); + + var detectButton = this.createLeftButton("Detect", () => self.detect_and_invalidate_mask_canvas(self)); + + var cancelButton = this.createRightButton("Cancel", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.close(); + }); + + self.saveButton = this.createRightButton("Save", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.save(self); + }); + + var undoButton = this.createLeftButton("Undo", () => { + if(self.prompt_points.length > 0) { + self.prompt_points.pop(); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + self.invalidatePointsCanvas(self); + } + }); + + bottom_panel.appendChild(clearButton); + bottom_panel.appendChild(detectButton); + bottom_panel.appendChild(self.saveButton); + bottom_panel.appendChild(cancelButton); + bottom_panel.appendChild(confidence_slider); + bottom_panel.appendChild(undoButton); + + imgCanvas.style.position = "relative"; + imgCanvas.style.top = "200"; + imgCanvas.style.left = "0"; + + maskCanvas.style.position = "absolute"; + maskCanvas.style.opacity = 0.5; + pointsCanvas.style.position = "absolute"; + } + + show() { + this.mask_image = null; + self.prompt_points = []; + + this.message_box = $el("p", ["Please wait a moment while the SAM model and the image are being loaded."]); + this.element.appendChild(this.message_box); + + if(self.imgCtx) { + self.imgCtx.clearRect(0, 0, self.imageCanvas.width, self.imageCanvas.height); + } + + const target_image_path = ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src; + this.load_sam(target_image_path); + + if(!this.is_layout_created) { + // layout + const imgCanvas = document.createElement('canvas'); + const maskCanvas = document.createElement('canvas'); + const pointsCanvas = document.createElement('canvas'); + + imgCanvas.id = "imageCanvas"; + maskCanvas.id = "maskCanvas"; + pointsCanvas.id = "pointsCanvas"; + + this.setlayout(imgCanvas, maskCanvas, pointsCanvas); + + // prepare content + this.imgCanvas = imgCanvas; + this.maskCanvas = maskCanvas; + this.pointsCanvas = pointsCanvas; + this.maskCtx = maskCanvas.getContext('2d'); + this.pointsCtx = pointsCanvas.getContext('2d'); + + this.is_layout_created = true; + + // replacement of onClose hook since close is not real close + const self = this; + const observer = new MutationObserver(function(mutations) { + mutations.forEach(function(mutation) { + if (mutation.type === 'attributes' && mutation.attributeName === 'style') { + if(self.last_display_style && self.last_display_style != 'none' && self.element.style.display == 'none') { + ComfyApp.onClipspaceEditorClosed(); + } + + self.last_display_style = self.element.style.display; + } + }); + }); + + const config = { attributes: true }; + observer.observe(this.element, config); + } + + this.setImages(target_image_path, this.imgCanvas, this.pointsCanvas); + + if(ComfyApp.clipspace_return_node) { + this.saveButton.innerText = "Save to node"; + } + else { + this.saveButton.innerText = "Save"; + } + this.saveButton.disabled = true; + + this.element.style.display = "block"; + this.element.style.zIndex = 8888; // NOTE: alert dialog must be high priority. + } + + updateBrushPreview(self, event) { + event.preventDefault(); + + const centerX = event.pageX; + const centerY = event.pageY; + + const brush = self.brush; + + brush.style.width = self.brush_size * 2 + "px"; + brush.style.height = self.brush_size * 2 + "px"; + brush.style.left = (centerX - self.brush_size) + "px"; + brush.style.top = (centerY - self.brush_size) + "px"; + } + + setImages(target_image_path, imgCanvas, pointsCanvas) { + const imgCtx = imgCanvas.getContext('2d'); + const maskCtx = this.maskCtx; + const maskCanvas = this.maskCanvas; + + const self = this; + + // image load + const orig_image = new Image(); + window.addEventListener("resize", () => { + // repositioning + imgCanvas.width = window.innerWidth - 250; + imgCanvas.height = window.innerHeight - 200; + + // redraw image + let drawWidth = orig_image.width; + let drawHeight = orig_image.height; + + if (orig_image.width > imgCanvas.width) { + drawWidth = imgCanvas.width; + drawHeight = (drawWidth / orig_image.width) * orig_image.height; + } + + if (drawHeight > imgCanvas.height) { + drawHeight = imgCanvas.height; + drawWidth = (drawHeight / orig_image.height) * orig_image.width; + } + + imgCtx.drawImage(orig_image, 0, 0, drawWidth, drawHeight); + + // update mask + pointsCanvas.width = drawWidth; + pointsCanvas.height = drawHeight; + pointsCanvas.style.top = imgCanvas.offsetTop + "px"; + pointsCanvas.style.left = imgCanvas.offsetLeft + "px"; + + maskCanvas.width = drawWidth; + maskCanvas.height = drawHeight; + maskCanvas.style.top = imgCanvas.offsetTop + "px"; + maskCanvas.style.left = imgCanvas.offsetLeft + "px"; + + self.invalidateMaskCanvas(self); + self.invalidatePointsCanvas(self); + }); + + // original image load + orig_image.onload = () => self.onLoaded(self); + const rgb_url = new URL(target_image_path); + rgb_url.searchParams.delete('channel'); + rgb_url.searchParams.set('channel', 'rgb'); + orig_image.src = rgb_url; + self.image = orig_image; + } + + onLoaded(self) { + if(self.message_box) { + self.element.removeChild(self.message_box); + self.message_box = null; + } + + window.dispatchEvent(new Event('resize')); + + self.setEventHandler(pointsCanvas); + self.saveButton.disabled = false; + } + + setEventHandler(targetCanvas) { + targetCanvas.addEventListener("contextmenu", (event) => { + event.preventDefault(); + }); + + const self = this; + targetCanvas.addEventListener('pointermove', (event) => this.updateBrushPreview(self,event)); + targetCanvas.addEventListener('pointerdown', (event) => this.handlePointerDown(self,event)); + targetCanvas.addEventListener('pointerover', (event) => { this.brush.style.display = "block"; }); + targetCanvas.addEventListener('pointerleave', (event) => { this.brush.style.display = "none"; }); + document.addEventListener('keydown', ImpactSamEditorDialog.handleKeyDown); + } + + static handleKeyDown(event) { + const self = ImpactSamEditorDialog.instance; + if (event.key === '=') { // positive + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + self.is_positive_mode = true; + } else if (event.key === '-') { // negative + brush.style.backgroundColor = "red"; + brush.style.outline = "2px solid skyblue"; + self.is_positive_mode = false; + } + } + + is_positive_mode = true; + prompt_points = []; + confidence = 70; + + invalidatePointsCanvas(self) { + const ctx = self.pointsCtx; + + for (const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + + const scaledX = x * ctx.canvas.width / self.image.width; + const scaledY = y * ctx.canvas.height / self.image.height; + + if(is_positive) + ctx.fillStyle = "blue"; + else + ctx.fillStyle = "red"; + ctx.beginPath(); + ctx.arc(scaledX, scaledY, 3, 0, 3 * Math.PI); + ctx.fill(); + } + } + + invalidateMaskCanvas(self) { + if(self.mask_image) { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.maskCtx.drawImage(self.mask_image, 0, 0, self.maskCanvas.width, self.maskCanvas.height); + } + } + + async load_sam(url) { + const parsedUrl = new URL(url); + const searchParams = new URLSearchParams(parsedUrl.search); + + const filename = searchParams.get("filename") || ""; + const fileType = searchParams.get("type") || ""; + const subfolder = searchParams.get("subfolder") || ""; + + const data = { + sam_model_name: "auto", + filename: filename, + type: fileType, + subfolder: subfolder + }; + + api.fetchApi('/sam/prepare', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data) + }); + } + + async detect(self) { + const positive_points = []; + const negative_points = []; + + for(const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + const point = [x,y]; + if(is_positive) + positive_points.push(point); + else + negative_points.push(point); + } + + const data = { + positive_points: positive_points, + negative_points: negative_points, + threshold: self.confidence/100 + }; + + const response = await api.fetchApi('/sam/detect', { + method: 'POST', + headers: { 'Content-Type': 'image/png' }, + body: JSON.stringify(data) + }); + + const blob = await response.blob(); + const url = URL.createObjectURL(blob); + + return new Promise((resolve, reject) => { + const image = new Image(); + image.onload = () => resolve(image); + image.onerror = reject; + image.src = url; + }); + } + + handlePointerDown(self, event) { + if ([0, 2, 5].includes(event.button)) { + event.preventDefault(); + const x = event.offsetX || event.targetTouches[0].clientX - maskRect.left; + const y = event.offsetY || event.targetTouches[0].clientY - maskRect.top; + + const originalX = x * self.image.width / self.pointsCanvas.width; + const originalY = y * self.image.height / self.pointsCanvas.height; + + var point = null; + if (event.button == 0) { + // positive + point = [true, originalX, originalY]; + } else { + // negative + point = [false, originalX, originalY]; + } + + self.prompt_points.push(point); + + self.invalidatePointsCanvas(self); + } + } + + async save(self) { + if(!self.mask_image) { + this.close(); + return; + } + + const save_canvas = document.createElement('canvas'); + + const save_ctx = save_canvas.getContext('2d', {willReadFrequently:true}); + save_canvas.width = self.mask_image.width; + save_canvas.height = self.mask_image.height; + + save_ctx.drawImage(self.mask_image, 0, 0, save_canvas.width, save_canvas.height); + + const save_data = save_ctx.getImageData(0, 0, save_canvas.width, save_canvas.height); + + // refine mask image + for (let i = 0; i < save_data.data.length; i += 4) { + if(save_data.data[i]) { + save_data.data[i+3] = 0; + } + else { + save_data.data[i+3] = 255; + } + + save_data.data[i] = 0; + save_data.data[i+1] = 0; + save_data.data[i+2] = 0; + } + + save_ctx.globalCompositeOperation = 'source-over'; + save_ctx.putImageData(save_data, 0, 0); + + const formData = new FormData(); + const filename = "clipspace-mask-" + performance.now() + ".png"; + + const item = + { + "filename": filename, + "subfolder": "", + "type": "temp", + }; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[0] = item; + + if(ComfyApp.clipspace.widgets) { + const index = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); + + if(index >= 0) + ComfyApp.clipspace.widgets[index].value = `${filename} [temp]`; + } + + const dataURL = save_canvas.toDataURL(); + const blob = dataURLToBlob(dataURL); + + let original_url = new URL(this.image.src); + + const original_ref = { filename: original_url.searchParams.get('filename') }; + + let original_subfolder = original_url.searchParams.get("subfolder"); + if(original_subfolder) + original_ref.subfolder = original_subfolder; + + let original_type = original_url.searchParams.get("type"); + if(original_type) + original_ref.type = original_type; + + formData.append('image', blob, filename); + formData.append('original_ref', JSON.stringify(original_ref)); + formData.append('type', "temp"); + + await uploadMask(item, formData); + ComfyApp.onClipspaceEditorSave(); + this.close(); + } +} + +app.registerExtension({ + name: "Comfy.Impact.SAMEditor", + init(app) { + const callback = + function () { + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }; + + const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 + ClipspaceDialog.registerButton("Impact SAM Detector", context_predicate, callback); + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (Array.isArray(nodeData.output) && (nodeData.output.includes("MASK") || nodeData.output.includes("IMAGE"))) { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Open in SAM Detector", + callback: () => { + ComfyApp.copyToClipspace(this); + ComfyApp.clipspace_return_node = this; + + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }, + }); + }); + } + } +}); + diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js new file mode 100644 index 0000000000000000000000000000000000000000..16af55aa5af43e8ff523c3cfcdd8363f092e2d06 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js @@ -0,0 +1,182 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +async function open_picker(node) { + const resp = await api.fetchApi(`/impact/segs/picker/count?id=${node.id}`); + const body = await resp.text(); + + let cnt = parseInt(body); + + var existingPicker = document.getElementById('impact-picker'); + if (existingPicker) { + existingPicker.parentNode.removeChild(existingPicker); + } + + var gallery = document.createElement('div'); + gallery.id = 'impact-picker'; + + gallery.style.position = "absolute"; + gallery.style.height = "80%"; + gallery.style.width = "80%"; + gallery.style.top = "10%"; + gallery.style.left = "10%"; + gallery.style.display = 'flex'; + gallery.style.flexWrap = 'wrap'; + gallery.style.maxHeight = '600px'; + gallery.style.overflow = 'auto'; + gallery.style.backgroundColor = 'rgba(0,0,0,0.3)'; + gallery.style.padding = '20px'; + gallery.draggable = false; + gallery.style.zIndex = 5000; + + var doneButton = document.createElement('button'); + doneButton.textContent = 'Done'; + doneButton.style.padding = '10px 10px'; + doneButton.style.border = 'none'; + doneButton.style.borderRadius = '5px'; + doneButton.style.fontFamily = 'Arial, sans-serif'; + doneButton.style.fontSize = '16px'; + doneButton.style.fontWeight = 'bold'; + doneButton.style.color = '#fff'; + doneButton.style.background = 'linear-gradient(to bottom, #0070B8, #003D66)'; + doneButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + doneButton.style.margin = "20px"; + doneButton.style.height = "40px"; + + var cancelButton = document.createElement('button'); + cancelButton.textContent = 'Cancel'; + cancelButton.style.padding = '10px 10px'; + cancelButton.style.border = 'none'; + cancelButton.style.borderRadius = '5px'; + cancelButton.style.fontFamily = 'Arial, sans-serif'; + cancelButton.style.fontSize = '16px'; + cancelButton.style.fontWeight = 'bold'; + cancelButton.style.color = '#fff'; + cancelButton.style.background = 'linear-gradient(to bottom, #ff70B8, #ff3D66)'; + cancelButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + cancelButton.style.margin = "20px"; + cancelButton.style.height = "40px"; + + const w = node.widgets.find((w) => w.name == 'picks'); + let prev_selected = w.value.split(',').map(function(item) { + return parseInt(item, 10); + }); + + let images = []; + doneButton.onclick = () => { + var result = ''; + for(let i in images) { + if(images[i].isSelected) { + if(result != '') + result += ', '; + + result += (parseInt(i)+1); + } + } + + w.value = result; + + gallery.parentNode.removeChild(gallery); + } + + cancelButton.onclick = () => { + gallery.parentNode.removeChild(gallery); + } + + var panel = document.createElement('div'); + panel.style.clear = 'both'; + panel.style.width = '100%'; + panel.style.height = '40px'; + panel.style.justifyContent = 'center'; + panel.style.alignItems = 'center'; + panel.style.display = 'flex'; + panel.appendChild(doneButton); + panel.appendChild(cancelButton); + gallery.appendChild(panel); + + var hint = document.createElement('label'); + hint.style.position = 'absolute'; + hint.innerHTML = 'Click: Toggle Selection
Ctrl-click: Single Selection'; + gallery.appendChild(hint); + + let max_size = 300; + + for(let i=0; i image.naturalHeight) { + ratio = max_size/image.naturalWidth; + } + else { + ratio = max_size/image.naturalHeight; + } + + let width = image.naturalWidth * ratio; + let height = image.naturalHeight * ratio; + + if(width < height) { + this.style.marginLeft = (200-width)/2+"px"; + } + else{ + this.style.marginTop = (200-height)/2+"px"; + } + + this.style.width = width+"px"; + this.style.height = height+"px"; + this.style.objectFit = 'cover'; + } + + image.addEventListener('click', function(event) { + if(event.ctrlKey) { + for(let i in images) { + if(images[i].isSelected) { + images[i].style.border = 'none'; + images[i].isSelected = false; + } + } + + image.style.border = '2px solid #006699'; + image.isSelected = true; + + return; + } + + if(image.isSelected) { + image.style.border = 'none'; + image.isSelected = false; + } + else { + image.style.border = '2px solid #006699'; + image.isSelected = true; + } + }); + + gallery.appendChild(image); + } + + document.body.appendChild(gallery); +} + + +app.registerExtension({ + name: "Comfy.Impack.Picker", + + nodeCreated(node, app) { + if(node.comfyClass == "ImpactSEGSPicker") { + node.addWidget("button", "pick", "image", () => { + open_picker(node); + }); + } + } +}); \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-wildcard.js b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-wildcard.js new file mode 100644 index 0000000000000000000000000000000000000000..ab7cd1391a82a3b0b572bb79737dfccede717b89 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/js/impact-wildcard.js @@ -0,0 +1,16 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; + +let refresh_btn = document.getElementById('comfy-refresh-button'); +let refresh_btn2 = document.querySelector('button[title="Refresh widgets in nodes to find new models or files"]'); + +let orig = refresh_btn.onclick; + +refresh_btn.onclick = function() { + orig(); + api.fetchApi('/impact/wildcards/refresh'); +}; + +refresh_btn2.addEventListener('click', function() { + api.fetchApi('/impact/wildcards/refresh'); +}); \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/latent.png b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/latent.png new file mode 100644 index 0000000000000000000000000000000000000000..19fed324a25a7e1a2252400e7752ce5586742429 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/latent.png differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18601250f91112eafaf692817e65bc80940da148 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8698adc0a7d57ec5e0c3cfda52134e54818d288a Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bff1c7be56685f78349a4ba07cae10634ec76c81 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c6f739848f61418fe0560c631e5d714ea89fdd6 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..157a7b38140c762f23070d6ec74f292e236ac842 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ffe7b6467ff072eedc525af191628d9f48f0c4c Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf3048b501ccb7ef4aecf90e569d60c8bd874fb Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e721ad80d487c925289831b18fceb4af182b8c Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02eb01a0cc88bd7d215e0a8c015739c60d8fa822 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85195c6ce237104d8c47da81c9ae5813ce7c8b3e Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..489edf7882c4d4b9d2539ba4e5be3575fb4593aa Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b8dbe484e65fca40a67bfe38d0fc76f22dd7bb8 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f82af53acc782620888b366420c2201d45c41d5 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d43c0bc08b86c5cf2e40eba7d2c2efa4eb7bc551 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45422ad43afbf51652465a13c34113ef6ef08538 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b747efcc2b889eb7ed1639460126ed125fa4ff Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fae43e4df3781a0f4a2c23dcc47cfba3e817a7ac Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2dda8da5a5f71e0a14c1c454bf84091e04b71cc Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef2993a717225c548e6afc8a1b9816bcb61d0de7 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfe7bbf780c2467b9c343fe7ecc70440511ab2cd Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d00bb295a94bb0ca6ed03e5ed885d4a7530014 Binary files /dev/null and b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc differ diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..49d5f5c957502b66f7813127000d67b432cd14f2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py @@ -0,0 +1,12 @@ +import sys +import subprocess + + +def ensure_onnx_package(): + try: + import onnxruntime + except Exception: + if "python_embeded" in sys.executable or "python_embedded" in sys.executable: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) + else: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..69b40ae01acf71c4e16f1e5d37043be3bc99da56 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py @@ -0,0 +1,178 @@ +from nodes import MAX_RESOLUTION +from impact.utils import * +import impact.core as core +from impact.core import SEG +from impact.segs_nodes import SEGSPaste + + +class SEGSDetailerForAnimateDiff: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "image_frames": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE",), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("SEGS", "IMAGE") + RETURN_NAMES = ("segs", "cnet_images") + OUTPUT_IS_LIST = (False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, noise_mask_feather=0, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image_frames.shape) + + new_segs = [] + cnet_image_list = [] + + for seg in segs[1]: + cropped_image_frames = None + + for image in image_frames: + image = image.unsqueeze(0) + cropped_image = seg.cropped_image if seg.cropped_image is not None else crop_tensor4(image, seg.crop_region) + cropped_image = to_tensor(cropped_image) + if cropped_image_frames is None: + cropped_image_frames = cropped_image + else: + cropped_image_frames = torch.concat((cropped_image_frames, cropped_image), dim=0) + + cropped_image_frames = cropped_image_frames.cpu().numpy() + + # It is assumed that AnimateDiff does not support conditioning masks based on test results, but it will be added for future consideration. + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + + enhanced_image_tensor, cnet_images = core.enhance_detail_for_animatediff(cropped_image_frames, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, seg.cropped_mask, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, + noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt) + if cnet_images is not None: + cnet_image_list.extend(cnet_images) + + if enhanced_image_tensor is None: + new_cropped_image = cropped_image_frames + else: + new_cropped_image = enhanced_image_tensor.cpu().numpy() + + new_seg = SEG(new_cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs), cnet_image_list + + def doit(self, image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + segs, cnet_images = SEGSDetailerForAnimateDiff.do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt, + noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + if len(cnet_images) == 0: + cnet_images = [empty_pil_tensor()] + + return (segs, cnet_images) + + +class DetailerForEachPipeForAnimateDiff: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "image_frames": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "basic_pipe": ("BASIC_PIPE", ), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") + RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def doit(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, basic_pipe, refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, + noise_mask_feather=0, scheduler_func_opt=None): + + enhanced_segs = [] + cnet_image_list = [] + + for sub_seg in segs[1]: + single_seg = segs[0], [sub_seg] + enhanced_seg, cnet_images = SEGSDetailerForAnimateDiff().do_detail(image_frames, single_seg, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt, noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + image_frames = SEGSPaste.doit(image_frames, enhanced_seg, feather, alpha=255)[0] + + if cnet_images is not None: + cnet_image_list.extend(cnet_images) + + if detailer_hook is not None: + image_frames = detailer_hook.post_paste(image_frames) + + enhanced_segs += enhanced_seg[1] + + new_segs = segs[0], enhanced_segs + return image_frames, new_segs, basic_pipe, cnet_image_list diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..27e69d80024006bb8b16dc50cce285dcd333ee60 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py @@ -0,0 +1,303 @@ +import os +from PIL import ImageOps +from impact.utils import * +import latent_preview + +# NOTE: this should not be `from . import core`. +# I don't know why but... 'from .' and 'from impact' refer to different core modules. +# This separates global variables of the core module and breaks the preview bridge. +from impact import core +# <-- +import random + + +class PreviewBridge: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + "image": ("STRING", {"default": ""}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "ImpactPack/Util" + + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prev_hash = None + + @staticmethod + def load_image(pb_id): + is_fail = False + if pb_id not in core.preview_bridge_image_id_map: + is_fail = True + + image_path, ui_item = core.preview_bridge_image_id_map[pb_id] + + if not os.path.isfile(image_path): + is_fail = True + + if not is_fail: + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + else: + image = empty_pil_tensor() + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + ui_item = { + "filename": 'empty.png', + "subfolder": '', + "type": 'temp' + } + + return image, mask.unsqueeze(0), ui_item + + def doit(self, images, image, unique_id): + need_refresh = False + + if unique_id not in core.preview_bridge_cache: + need_refresh = True + + elif core.preview_bridge_cache[unique_id][0] is not images: + need_refresh = True + + if not need_refresh: + pixels, mask, path_item = PreviewBridge.load_image(image) + image = [path_item] + else: + res = nodes.PreviewImage().save_images(images, filename_prefix="PreviewBridge/PB-") + image2 = res['ui']['images'] + pixels = images + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', image2[0]['filename']) + core.set_previewbridge_image(unique_id, path, image2[0]) + core.preview_bridge_image_id_map[image] = (path, image2[0]) + core.preview_bridge_image_name_map[unique_id, path] = (image, image2[0]) + core.preview_bridge_cache[unique_id] = (images, image2) + + image = image2 + + return { + "ui": {"images": image}, + "result": (pixels, mask, ), + } + + +def decode_latent(latent, preview_method, vae_opt=None): + if vae_opt is not None: + image = nodes.VAEDecode().decode(vae_opt, latent)[0] + return image + + from comfy.cli_args import LatentPreviewMethod + import comfy.latent_formats as latent_formats + + if preview_method.startswith("TAE"): + decoder_name = None + + if preview_method == "TAESD15": + decoder_name = "taesd" + elif preview_method == 'TAESDXL': + decoder_name = "taesdxl" + elif preview_method == 'TAESD3': + decoder_name = "taesd3" + + if decoder_name: + vae = nodes.VAELoader().load_vae(decoder_name)[0] + image = nodes.VAEDecode().decode(vae, latent)[0] + return image + + if preview_method == "Latent2RGB-SD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SDXL": + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD3": + latent_format = latent_formats.SD3() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD-X4": + latent_format = latent_formats.SD_X4() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-Playground-2.5": + latent_format = latent_formats.SDXL_Playground_2_5() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-Prior": + latent_format = latent_formats.SC_Prior() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-B": + latent_format = latent_formats.SC_B() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-FLUX.1": + latent_format = latent_formats.Flux() + method = LatentPreviewMethod.Latent2RGB + else: + print(f"[Impact Pack] PreviewBridgeLatent: '{preview_method}' is unsupported preview method.") + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + + previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) + samples = latent_format.process_in(latent['samples']) + + pil_image = previewer.decode_latent_to_preview(samples) + pixels_size = pil_image.size[0]*8, pil_image.size[1]*8 + resized_image = pil_image.resize(pixels_size, resample=LANCZOS) + + return to_tensor(resized_image).unsqueeze(0) + + +class PreviewBridgeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent": ("LATENT",), + "image": ("STRING", {"default": ""}), + "preview_method": (["Latent2RGB-SD3", "Latent2RGB-SDXL", "Latent2RGB-SD15", + "Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", + "Latent2RGB-SC-Prior", "Latent2RGB-SC-B", + "Latent2RGB-FLUX.1", + "TAESD3", "TAESDXL", "TAESD15"],), + }, + "optional": { + "vae_opt": ("VAE", ) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT", "MASK", ) + + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "ImpactPack/Util" + + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prev_hash = None + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + + @staticmethod + def load_image(pb_id): + is_fail = False + if pb_id not in core.preview_bridge_image_id_map: + is_fail = True + + image_path, ui_item = core.preview_bridge_image_id_map[pb_id] + + if not os.path.isfile(image_path): + is_fail = True + + if not is_fail: + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = None + else: + image = empty_pil_tensor() + mask = None + ui_item = { + "filename": 'empty.png', + "subfolder": '', + "type": 'temp' + } + + return image, mask, ui_item + + def doit(self, latent, image, preview_method, vae_opt=None, unique_id=None): + latent_channels = latent['samples'].shape[1] + preview_method_channels = 16 if 'SD3' in preview_method or 'SC-Prior' in preview_method or 'FLUX.1' in preview_method else 4 + + if vae_opt is None and latent_channels != preview_method_channels: + print(f"[PreviewBridgeLatent] The version of latent is not compatible with preview_method.\nSD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.") + raise Exception("The version of latent is not compatible with preview_method.
SD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.") + + need_refresh = False + + if unique_id not in core.preview_bridge_cache: + need_refresh = True + + elif (core.preview_bridge_cache[unique_id][0] is not latent + or (vae_opt is None and core.preview_bridge_cache[unique_id][2] is not None) + or (vae_opt is None and core.preview_bridge_cache[unique_id][1] != preview_method) + or (vae_opt is not None and core.preview_bridge_cache[unique_id][2] is not vae_opt)): + need_refresh = True + + if not need_refresh: + pixels, mask, path_item = PreviewBridge.load_image(image) + + if mask is None: + mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0) + if 'noise_mask' in latent: + res_latent = latent.copy() + del res_latent['noise_mask'] + else: + res_latent = latent + else: + res_latent = latent.copy() + res_latent['noise_mask'] = mask + + res_image = [path_item] + else: + decoded_image = decode_latent(latent, preview_method, vae_opt) + + if 'noise_mask' in latent: + mask = latent['noise_mask'].squeeze(0) # 4D mask -> 3D mask + + decoded_pil = to_pil(decoded_image) + + inverted_mask = 1 - mask # invert + resized_mask = resize_mask(inverted_mask, (decoded_image.shape[1], decoded_image.shape[2])) + result_pil = apply_mask_alpha_to_pil(decoded_pil, resized_mask) + + full_output_folder, filename, counter, _, _ = folder_paths.get_save_image_path("PreviewBridge/PBL-"+self.prefix_append, folder_paths.get_temp_directory(), result_pil.size[0], result_pil.size[1]) + file = f"{filename}_{counter}.png" + result_pil.save(os.path.join(full_output_folder, file), compress_level=4) + res_image = [{ + 'filename': file, + 'subfolder': 'PreviewBridge', + 'type': 'temp', + }] + else: + mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0) + res = nodes.PreviewImage().save_images(decoded_image, filename_prefix="PreviewBridge/PBL-") + res_image = res['ui']['images'] + + path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', res_image[0]['filename']) + core.set_previewbridge_image(unique_id, path, res_image[0]) + core.preview_bridge_image_id_map[image] = (path, res_image[0]) + core.preview_bridge_image_name_map[unique_id, path] = (image, res_image[0]) + core.preview_bridge_cache[unique_id] = (latent, preview_method, vae_opt, res_image) + + res_latent = latent + + return { + "ui": {"images": res_image}, + "result": (res_latent, mask, ), + } diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5a39f2b9cd3ec18d1e69a185d632862541b1d8 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py @@ -0,0 +1,68 @@ +import configparser +import os + +version_code = [6, 0, 2] +version = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '') + +dependency_version = 22 + +my_path = os.path.dirname(__file__) +old_config_path = os.path.join(my_path, "impact-pack.ini") +config_path = os.path.join(my_path, "..", "..", "impact-pack.ini") +latent_letter_path = os.path.join(my_path, "..", "..", "latent.png") + + +def write_config(): + config = configparser.ConfigParser() + config['default'] = { + 'dependency_version': str(dependency_version), + 'mmdet_skip': str(get_config()['mmdet_skip']), + 'sam_editor_cpu': str(get_config()['sam_editor_cpu']), + 'sam_editor_model': get_config()['sam_editor_model'], + 'custom_wildcards': get_config()['custom_wildcards'], + 'disable_gpu_opencv': get_config()['disable_gpu_opencv'], + } + with open(config_path, 'w') as configfile: + config.write(configfile) + + +def read_config(): + try: + config = configparser.ConfigParser() + config.read(config_path) + default_conf = config['default'] + + if not os.path.exists(default_conf['custom_wildcards']): + print(f"[WARN] ComfyUI-Impact-Pack: custom_wildcards path not found: {default_conf['custom_wildcards']}. Using default path.") + default_conf['custom_wildcards'] = os.path.join(my_path, "..", "..", "custom_wildcards") + + return { + 'dependency_version': int(default_conf['dependency_version']), + 'mmdet_skip': default_conf['mmdet_skip'].lower() == 'true' if 'mmdet_skip' in default_conf else True, + 'sam_editor_cpu': default_conf['sam_editor_cpu'].lower() == 'true' if 'sam_editor_cpu' in default_conf else False, + 'sam_editor_model': default_conf['sam_editor_model'].lower() if 'sam_editor_model' else 'sam_vit_b_01ec64.pth', + 'custom_wildcards': default_conf['custom_wildcards'] if 'custom_wildcards' in default_conf else os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': default_conf['disable_gpu_opencv'].lower() == 'true' if 'disable_gpu_opencv' in default_conf else True + } + + except Exception: + return { + 'dependency_version': 0, + 'mmdet_skip': True, + 'sam_editor_cpu': False, + 'sam_editor_model': 'sam_vit_b_01ec64.pth', + 'custom_wildcards': os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': True + } + + +cached_config = None + + +def get_config(): + global cached_config + + if cached_config is None: + cached_config = read_config() + + return cached_config diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py new file mode 100644 index 0000000000000000000000000000000000000000..40fff48150bb57d2129f8047a9a4490f4e41f755 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py @@ -0,0 +1,2178 @@ +import copy +import os +import warnings + +import numpy +import torch +from segment_anything import SamPredictor + +from comfy_extras.nodes_custom_sampler import Noise_RandomNoise +from impact.utils import * +from collections import namedtuple +import numpy as np +from skimage.measure import label + +import nodes +import comfy_extras.nodes_upscale_model as model_upscale +from server import PromptServer +import comfy +import impact.wildcards as wildcards +import math +import cv2 +import time +from comfy import model_management +from impact import utils +from impact import impact_sampling +from concurrent.futures import ThreadPoolExecutor + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + print(f"\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +SEG = namedtuple("SEG", + ['cropped_image', 'cropped_mask', 'confidence', 'crop_region', 'bbox', 'label', 'control_net_wrapper'], + defaults=[None]) + +pb_id_cnt = time.time() +preview_bridge_image_id_map = {} +preview_bridge_image_name_map = {} +preview_bridge_cache = {} +current_prompt = None + +SCHEDULERS = comfy.samplers.KSampler.SCHEDULERS + ['AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]'] + + +def set_previewbridge_image(node_id, file, item): + global pb_id_cnt + + if file in preview_bridge_image_name_map: + pb_id = preview_bridge_image_name_map[node_id, file] + if pb_id.startswith(f"${node_id}"): + return pb_id + + pb_id = f"${node_id}-{pb_id_cnt}" + preview_bridge_image_id_map[pb_id] = (file, item) + preview_bridge_image_name_map[node_id, file] = (pb_id, item) + pb_id_cnt += 1 + + return pb_id + + +def erosion_mask(mask, grow_mask_by): + mask = make_2d_mask(mask) + + w = mask.shape[1] + h = mask.shape[0] + + device = comfy.model_management.get_torch_device() + mask = mask.clone().to(device) + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear").to(device) + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)).to(device) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round().cpu() + + +# CREDIT: https://github.com/BlenderNeko/ComfyUI_Noise/blob/afb14757216257b12268c91845eac248727a55e2/nodes.py#L68 +# https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 +def slerp(val, low, high): + dims = low.shape + + low = low.reshape(dims[0], -1) + high = high.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + + return res.reshape(dims) + + +def mix_noise(from_noise, to_noise, strength, variation_method): + if variation_method == 'slerp': + mixed_noise = slerp(strength, from_noise, to_noise) + else: + # linear + mixed_noise = (1 - strength) * from_noise + strength * to_noise + + # NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling. + scale_factor = math.sqrt((1 - strength) ** 2 + strength ** 2) + mixed_noise /= scale_factor + + return mixed_noise + + +class REGIONAL_PROMPT: + def __init__(self, mask, sampler, variation_seed=0, variation_strength=0.0, variation_method='linear'): + mask = make_2d_mask(mask) + + self.mask = mask + self.sampler = sampler + self.mask_erosion = None + self.erosion_factor = None + self.variation_seed = variation_seed + self.variation_strength = variation_strength + self.variation_method = variation_method + + def clone_with_sampler(self, sampler): + rp = REGIONAL_PROMPT(self.mask, sampler) + rp.mask_erosion = self.mask_erosion + rp.erosion_factor = self.erosion_factor + rp.variation_seed = self.variation_seed + rp.variation_strength = self.variation_strength + rp.variation_method = self.variation_method + return rp + + def get_mask_erosion(self, factor): + if self.mask_erosion is None or self.erosion_factor != factor: + self.mask_erosion = erosion_mask(self.mask, factor) + self.erosion_factor = factor + + return self.mask_erosion + + def touch_noise(self, noise): + if self.variation_strength > 0.0: + mask = utils.make_3d_mask(self.mask) + mask = utils.resize_mask(mask, (noise.shape[2], noise.shape[3])).unsqueeze(0) + + regional_noise = Noise_RandomNoise(self.variation_seed).generate_noise({'samples': noise}) + mixed_noise = mix_noise(noise, regional_noise, self.variation_strength, variation_method=self.variation_method) + + return (mask == 1).float() * mixed_noise + (mask == 0).float() * noise + + return noise + + +class NO_BBOX_DETECTOR: + pass + + +class NO_SEGM_DETECTOR: + pass + + +def create_segmasks(results): + bboxs = results[1] + segms = results[2] + confidence = results[3] + + results = [] + for i in range(len(segms)): + item = (bboxs[i], segms[i].astype(np.float32), confidence[i]) + results.append(item) + return results + + +def gen_detection_hints_from_mask_area(x, y, mask, threshold, use_negative): + mask = make_2d_mask(mask) + + points = [] + plabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(mask.shape[0] / 20)) + x_step = max(3, int(mask.shape[1] / 20)) + + for i in range(0, len(mask), y_step): + for j in range(0, len(mask[i]), x_step): + if mask[i][j] > threshold: + points.append((x + j, y + i)) + plabs.append(1) + elif use_negative and mask[i][j] == 0: + points.append((x + j, y + i)) + plabs.append(0) + + return points, plabs + + +def gen_negative_hints(w, h, x1, y1, x2, y2): + npoints = [] + nplabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(w / 20)) + x_step = max(3, int(h / 20)) + + for i in range(10, h - 10, y_step): + for j in range(10, w - 10, x_step): + if not (x1 - 10 <= j and j <= x2 + 10 and y1 - 10 <= i and i <= y2 + 10): + npoints.append((j, i)) + nplabs.append(0) + + return npoints, nplabs + + +def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, force_inpaint, + wildcard_opt=None, wildcard_opt_concat_mode=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None, control_net_wrapper=None, cycle=1, + inpaint_model=False, noise_mask_feather=0, scheduler_func=None): + + if noise_mask is not None: + noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0: + model = nodes_differential_diffusion.DifferentialDiffusion().apply(model)[0] + + if wildcard_opt is not None and wildcard_opt != "": + model, _, wildcard_positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + if wildcard_opt_concat_mode == "concat": + positive = nodes.ConditioningConcat().concat(positive, wildcard_positive)[0] + else: + positive = wildcard_positive + positive = [positive[0].copy()] + if 'pooled_output' in wildcard_positive[0][1]: + positive[0][1]['pooled_output'] = wildcard_positive[0][1]['pooled_output'] + elif 'pooled_output' in positive[0][1]: + del positive[0][1]['pooled_output'] + + h = image.shape[1] + w = image.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if not force_inpaint and bbox_h >= guide_size and bbox_w >= guide_size: + print(f"Detailer: segment skip (enough big)") + return None, None + + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if not force_inpaint: + if upscale <= 1.0: + print(f"Detailer: segment skip [determined upscale factor={upscale}]") + return None, None + + if new_w == 0 or new_h == 0: + print(f"Detailer: segment skip [zero size={new_w, new_h}]") + return None, None + else: + if upscale <= 1.0 or new_w == 0 or new_h == 0: + print(f"Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + print(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale + upscaled_image = tensor_resize(image, new_w, new_h) + + cnet_pils = None + if control_net_wrapper is not None: + positive, negative, cnet_pils = control_net_wrapper.apply(positive, negative, upscaled_image, noise_mask) + model, cnet_pils2 = control_net_wrapper.doit_ipadapter(model) + cnet_pils.extend(cnet_pils2) + + # prepare mask + if noise_mask is not None and inpaint_model: + positive, negative, latent_image = nodes.InpaintModelConditioning().encode(positive, negative, upscaled_image, vae, noise_mask) + else: + latent_image = to_latent_image(upscaled_image, vae) + if noise_mask is not None: + latent_image['noise_mask'] = noise_mask + + if detailer_hook is not None: + latent_image = detailer_hook.post_encode(latent_image) + + refined_latent = latent_image + + # ksampler + for i in range(0, cycle): + if detailer_hook is not None: + if detailer_hook is not None: + detailer_hook.set_steps((i, cycle)) + + refined_latent = detailer_hook.cycle_latent(refined_latent) + + model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, upscaled_latent2, denoise2 = \ + detailer_hook.pre_ksample(model, seed+i, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise) + noise, is_touched = detailer_hook.get_custom_noise(seed+i, torch.zeros(latent_image['samples'].size()), is_touched=False) + if not is_touched: + noise = None + else: + model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, upscaled_latent2, denoise2 = \ + model, seed + i, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise + noise = None + + refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, + refined_latent, denoise2, refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative, + noise=noise, scheduler_func=scheduler_func) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + # non-latent downscale - latent downscale cause bad quality + try: + # try to decode image normally + refined_image = vae.decode(refined_latent['samples']) + except Exception as e: + #usually an out-of-memory exception from the decode, so try a tiled approach + refined_image = vae.decode_tiled(refined_latent["samples"], tile_x=64, tile_y=64, ) + + if detailer_hook is not None: + refined_image = detailer_hook.post_decode(refined_image) + + # downscale + refined_image = tensor_resize(refined_image, w, h) + + # prevent mixing of device + refined_image = refined_image.cpu() + + # don't convert to latent - latent break image + # preserving pil is much better + return refined_image, cnet_pils + + +def enhance_detail_for_animatediff(image_frames, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, + wildcard_opt=None, wildcard_opt_concat_mode=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None, control_net_wrapper=None, noise_mask_feather=0, scheduler_func=None): + if noise_mask is not None: + noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0: + model = nodes_differential_diffusion.DifferentialDiffusion().apply(model)[0] + + if wildcard_opt is not None and wildcard_opt != "": + model, _, wildcard_positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + if wildcard_opt_concat_mode == "concat": + positive = nodes.ConditioningConcat().concat(positive, wildcard_positive)[0] + else: + positive = wildcard_positive + + h = image_frames.shape[1] + w = image_frames.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if upscale <= 1.0 or new_w == 0 or new_h == 0: + print(f"Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + print(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale the mask tensor by a factor of 2 using bilinear interpolation + if isinstance(noise_mask, np.ndarray): + noise_mask = torch.from_numpy(noise_mask) + + if len(noise_mask.shape) == 2: + noise_mask = noise_mask.unsqueeze(0) + else: # == 3 + noise_mask = noise_mask + + upscaled_mask = None + + for single_mask in noise_mask: + single_mask = single_mask.unsqueeze(0).unsqueeze(0) + upscaled_single_mask = torch.nn.functional.interpolate(single_mask, size=(new_h, new_w), mode='bilinear', align_corners=False) + upscaled_single_mask = upscaled_single_mask.squeeze(0) + + if upscaled_mask is None: + upscaled_mask = upscaled_single_mask + else: + upscaled_mask = torch.cat((upscaled_mask, upscaled_single_mask), dim=0) + + latent_frames = None + for image in image_frames: + image = torch.from_numpy(image).unsqueeze(0) + + # upscale + upscaled_image = tensor_resize(image, new_w, new_h) + + # ksampler + samples = to_latent_image(upscaled_image, vae)['samples'] + + if latent_frames is None: + latent_frames = samples + else: + latent_frames = torch.concat((latent_frames, samples), dim=0) + + cnet_images = None + if control_net_wrapper is not None: + positive, negative, cnet_images = control_net_wrapper.apply(positive, negative, torch.from_numpy(image_frames), noise_mask, use_acn=True) + + if len(upscaled_mask) != len(image_frames) and len(upscaled_mask) > 1: + print(f"[Impact Pack] WARN: DetailerForAnimateDiff - The number of the mask frames({len(upscaled_mask)}) and the image frames({len(image_frames)}) are different. Combine the mask frames and apply.") + combined_mask = upscaled_mask[0].to(torch.uint8) + + for frame_mask in upscaled_mask[1:]: + combined_mask |= (frame_mask * 255).to(torch.uint8) + + combined_mask = (combined_mask/255.0).to(torch.float32) + + upscaled_mask = combined_mask.expand(len(image_frames), -1, -1) + upscaled_mask = utils.to_binary_mask(upscaled_mask, 0.1) + + latent = { + 'noise_mask': upscaled_mask, + 'samples': latent_frames + } + + if detailer_hook is not None: + latent = detailer_hook.post_encode(latent) + + refined_latent = impact_sampling.ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + latent, denoise, refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative, scheduler_func=scheduler_func) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + refined_image_frames = None + for refined_sample in refined_latent['samples']: + refined_sample = refined_sample.unsqueeze(0) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_sample) + + if refined_image_frames is None: + refined_image_frames = refined_image + else: + refined_image_frames = torch.concat((refined_image_frames, refined_image), dim=0) + + if detailer_hook is not None: + refined_image_frames = detailer_hook.post_decode(refined_image_frames) + + refined_image_frames = nodes.ImageScale().upscale(image=refined_image_frames, upscale_method='lanczos', width=w, height=h, crop='disabled')[0] + + return refined_image_frames, cnet_images + + +def composite_to(dest_latent, crop_region, src_latent): + x1 = crop_region[0] + y1 = crop_region[1] + + # composite to original latent + lc = nodes.LatentComposite() + orig_image = lc.composite(dest_latent, src_latent, x1, y1) + + return orig_image[0] + + +def sam_predict(predictor, points, plabs, bbox, threshold): + point_coords = None if not points else np.array(points) + point_labels = None if not plabs else np.array(plabs) + + box = np.array([bbox]) if bbox is not None else None + + cur_masks, scores, _ = predictor.predict(point_coords=point_coords, point_labels=point_labels, box=box) + + total_masks = [] + + selected = False + max_score = 0 + max_mask = None + for idx in range(len(scores)): + if scores[idx] > max_score: + max_score = scores[idx] + max_mask = cur_masks[idx] + + if scores[idx] >= threshold: + selected = True + total_masks.append(cur_masks[idx]) + else: + pass + + if not selected and max_mask is not None: + total_masks.append(max_mask) + + return total_masks + + +class SAMWrapper: + def __init__(self, model, is_auto_mode, safe_to_gpu=None): + self.model = model + self.safe_to_gpu = safe_to_gpu if safe_to_gpu is not None else SafeToGPU_stub() + self.is_auto_mode = is_auto_mode + + def prepare_device(self): + if self.is_auto_mode: + device = comfy.model_management.get_torch_device() + self.safe_to_gpu.to_device(self.model, device=device) + + def release_device(self): + if self.is_auto_mode: + self.model.to(device="cpu") + + def predict(self, image, points, plabs, bbox, threshold): + predictor = SamPredictor(self.model) + predictor.set_image(image, "RGB") + + return sam_predict(predictor, points, plabs, bbox, threshold) + + +class ESAMWrapper: + def __init__(self, model, device): + self.model = model + self.func_inference = nodes.NODE_CLASS_MAPPINGS['Yoloworld_ESAM_Zho'] + self.device = device + + def prepare_device(self): + pass + + def release_device(self): + pass + + def predict(self, image, points, plabs, bbox, threshold): + if self.device == 'CPU': + self.device = 'cpu' + else: + self.device = 'cuda' + + detected_masks = self.func_inference.inference_sam_with_boxes(image=image, xyxy=[bbox], model=self.model, device=self.device) + return [detected_masks.squeeze(0)] + + +def make_sam_mask(sam, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + + if not hasattr(sam, 'sam_wrapper'): + raise Exception("[Impact Pack] Invalid SAMLoader is connected. Make sure 'SAMLoader (Impact)'.\nKnown issue: The ComfyUI-YOLO node overrides the SAMLoader (Impact), making it unusable. You need to uninstall ComfyUI-YOLO.\n\n\n") + + sam_obj = sam.sam_wrapper + sam_obj.prepare_device() + + try: + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(segs[i].bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_obj.predict(image, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = center_of_bbox(segs[i].bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].crop_region[2], segs[i].crop_region[3]) + + points += npoints + plabs += nplabs + + detected_masks = sam_obj.predict(image, points, plabs, dilated_bbox, threshold) + total_masks += detected_masks + + # merge every collected masks + mask = combine_masks2(total_masks) + + finally: + sam_obj.release_device() + + if mask is not None: + mask = mask.float() + mask = dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + else: + size = image.shape[0], image.shape[1] + mask = torch.zeros(size, dtype=torch.float32, device="cpu") # empty mask + + mask = utils.make_3d_mask(mask) + return mask + + +def generate_detection_hints(image, seg, center, detection_hint, dilated_bbox, mask_hint_threshold, use_small_negative, + mask_hint_use_negative): + [x1, y1, x2, y2] = dilated_bbox + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = center_of_bbox(seg.bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(seg.crop_region[0], seg.crop_region[1], + seg.cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + seg.crop_region[0], seg.crop_region[1], + seg.crop_region[2], seg.crop_region[3]) + + points += npoints + plabs += nplabs + + return points, plabs + + +def convert_and_stack_masks(masks): + if len(masks) == 0: + return None + + mask_tensors = [] + for mask in masks: + mask_array = np.array(mask, dtype=np.uint8) + mask_tensor = torch.from_numpy(mask_array) + mask_tensors.append(mask_tensor) + + stacked_masks = torch.stack(mask_tensors, dim=0) + stacked_masks = stacked_masks.unsqueeze(1) + + return stacked_masks + + +def merge_and_stack_masks(stacked_masks, group_size): + if stacked_masks is None: + return None + + num_masks = stacked_masks.size(0) + merged_masks = [] + + for i in range(0, num_masks, group_size): + subset_masks = stacked_masks[i:i + group_size] + merged_mask = torch.any(subset_masks, dim=0) + merged_masks.append(merged_mask) + + if len(merged_masks) > 0: + merged_masks = torch.stack(merged_masks, dim=0) + + return merged_masks + + +def segs_scale_match(segs, target_shape): + h = segs[0][0] + w = segs[0][1] + + th = target_shape[1] + tw = target_shape[2] + + if (h == th and w == tw) or h == 0 or w == 0: + return segs + + rh = th / h + rw = tw / w + + new_segs = [] + for seg in segs[1]: + cropped_image = seg.cropped_image + cropped_mask = seg.cropped_mask + x1, y1, x2, y2 = seg.crop_region + bx1, by1, bx2, by2 = seg.bbox + + crop_region = int(x1*rw), int(y1*rw), int(x2*rh), int(y2*rh) + bbox = int(bx1*rw), int(by1*rw), int(bx2*rh), int(by2*rh) + new_w = crop_region[2] - crop_region[0] + new_h = crop_region[3] - crop_region[1] + + if isinstance(cropped_mask, np.ndarray): + cropped_mask = torch.from_numpy(cropped_mask) + + if isinstance(cropped_mask, torch.Tensor) and len(cropped_mask.shape) == 3: + cropped_mask = torch.nn.functional.interpolate(cropped_mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False) + cropped_mask = cropped_mask.squeeze(0) + else: + cropped_mask = torch.nn.functional.interpolate(cropped_mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False) + cropped_mask = cropped_mask.squeeze(0).squeeze(0).numpy() + + if cropped_image is not None: + cropped_image = tensor_resize(cropped_image if isinstance(cropped_image, torch.Tensor) else torch.from_numpy(cropped_image), new_w, new_h) + cropped_image = cropped_image.numpy() + + new_seg = SEG(cropped_image, cropped_mask, seg.confidence, crop_region, bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return (th, tw), new_segs + + +# Used Python's slicing feature. stacked_masks[2::3] means starting from index 2, selecting every third tensor with a step size of 3. +# This allows for quickly obtaining the last tensor of every three tensors in stacked_masks. +def every_three_pick_last(stacked_masks): + selected_masks = stacked_masks[2::3] + return selected_masks + + +def make_sam_mask_segmented(sam, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + + if not hasattr(sam, 'sam_wrapper'): + raise Exception("[Impact Pack] Invalid SAMLoader is connected. Make sure 'SAMLoader (Impact)'.") + + sam_obj = sam.sam_wrapper + sam_obj.prepare_device() + + try: + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_obj.predict(image, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points, plabs = generate_detection_hints(image, segs[i], center, detection_hint, dilated_bbox, + mask_hint_threshold, use_small_negative, + mask_hint_use_negative) + + detected_masks = sam_obj.predict(image, points, plabs, dilated_bbox, threshold) + + total_masks += detected_masks + + # merge every collected masks + mask = combine_masks2(total_masks) + + finally: + sam_obj.release_device() + + mask_working_device = torch.device("cpu") + + if mask is not None: + mask = mask.float() + mask = dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + mask = mask.to(device=mask_working_device) + else: + # Extracting batch, height and width + height, width, _ = image.shape + mask = torch.zeros( + (height, width), dtype=torch.float32, device=mask_working_device + ) # empty mask + + stacked_masks = convert_and_stack_masks(total_masks) + + return (mask, merge_and_stack_masks(stacked_masks, group_size=3)) + # return every_three_pick_last(stacked_masks) + + +def segs_bitwise_and_mask(segs, mask): + mask = make_2d_mask(mask) + + if mask is None: + print("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return ([],) + + items = [] + + mask = (mask.cpu().numpy() * 255).astype(np.uint8) + + for seg in segs[1]: + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def segs_bitwise_subtract_mask(segs, mask): + mask = make_2d_mask(mask) + + if mask is None: + print("[SegsBitwiseSubtractMask] Cannot operate: MASK is empty.") + return ([],) + + items = [] + + mask = (mask.cpu().numpy() * 255).astype(np.uint8) + + for seg in segs[1]: + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = cv2.subtract(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def apply_mask_to_each_seg(segs, masks): + if masks is None: + print("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return (segs[0], [],) + + items = [] + + masks = masks.squeeze(1) + + for seg, mask in zip(segs[1], masks): + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = (mask.cpu().numpy() * 255).astype(np.uint8) + cropped_mask2 = cropped_mask2[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def dilate_segs(segs, factor): + if factor == 0: + return segs + + new_segs = [] + for seg in segs[1]: + new_mask = dilate_mask(seg.cropped_mask, factor) + new_seg = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return (segs[0], new_segs) + + +class ONNXDetector: + onnx_model = None + + def __init__(self, onnx_model): + self.onnx_model = onnx_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + try: + import impact.onnx as onnx + + h = image.shape[1] + w = image.shape[2] + + labels, scores, boxes = onnx.onnx_inference(image, self.onnx_model) + + # collect feasible item + result = [] + + for i in range(len(labels)): + if scores[i] > threshold: + item_bbox = boxes[i] + x1, y1, x2, y2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = item_bbox.post_crop_region(w, h, item_bbox, crop_region) + + crop_x1, crop_y1, crop_x2, crop_y2, = crop_region + + # prepare cropped mask + cropped_mask = np.zeros((crop_y2 - crop_y1, crop_x2 - crop_x1)) + cropped_mask[y1 - crop_y1:y2 - crop_y1, x1 - crop_x1:x2 - crop_x1] = 1 + cropped_mask = dilate_mask(cropped_mask, dilation) + + # make items. just convert the integer label to a string + item = SEG(None, cropped_mask, scores[i], crop_region, item_bbox, str(labels[i]), None) + result.append(item) + + shape = h, w + segs = shape, result + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + except Exception as e: + print(f"ONNXDetector: unable to execute.\n{e}") + pass + + def detect_combined(self, image, threshold, dilation): + return segs_to_combined_mask(self.detect(image, threshold, dilation, 1)) + + def setAux(self, x): + pass + + +def batch_mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size=1, label='A', crop_min_size=None, detailer_hook=None): + combined_mask = mask.max(dim=0).values + + segs = mask_to_segs(combined_mask, combined, crop_factor, bbox_fill, drop_size, label, crop_min_size, detailer_hook) + + new_segs = [] + for seg in segs[1]: + x1, y1, x2, y2 = seg.crop_region + cropped_mask = mask[:, y1:y2, x1:x2] + item = SEG(None, cropped_mask, 1.0, seg.crop_region, seg.bbox, label, None) + new_segs.append(item) + + return segs[0], new_segs + + +def mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size=1, label='A', crop_min_size=None, detailer_hook=None, is_contour=True): + drop_size = max(drop_size, 1) + if mask is None: + print("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + if isinstance(mask, np.ndarray): + pass # `mask` is already a NumPy array + else: + try: + mask = mask.numpy() + except AttributeError: + print("[mask_to_segs] Cannot operate: MASK is not a NumPy array or Tensor.") + return ([],) + + if mask is None: + print("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + result = [] + + if len(mask.shape) == 2: + mask = np.expand_dims(mask, axis=0) + + for i in range(mask.shape[0]): + mask_i = mask[i] + + if combined: + indices = np.nonzero(mask_i) + if len(indices[0]) > 0 and len(indices[1]) > 0: + bbox = ( + np.min(indices[1]), + np.min(indices[0]), + np.max(indices[1]), + np.max(indices[0]), + ) + crop_region = make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor + ) + x1, y1, x2, y2 = crop_region + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if x2 - x1 > 0 and y2 - y1 > 0: + cropped_mask = mask_i[y1:y2, x1:x2] + + if bbox_fill: + bx1, by1, bx2, by2 = bbox + cropped_mask = cropped_mask.copy() + cropped_mask[by1:by2, bx1:bx2] = 1.0 + + if cropped_mask is not None: + item = SEG(None, cropped_mask, 1.0, crop_region, bbox, label, None) + result.append(item) + + else: + mask_i_uint8 = (mask_i * 255.0).astype(np.uint8) + contours, ctree = cv2.findContours(mask_i_uint8, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + for j, contour in enumerate(contours): + hierarchy = ctree[0][j] + if hierarchy[3] != -1: + continue + + separated_mask = np.zeros_like(mask_i_uint8) + cv2.drawContours(separated_mask, [contour], 0, 255, -1) + separated_mask = np.array(separated_mask / 255.0).astype(np.float32) + + x, y, w, h = cv2.boundingRect(contour) + bbox = x, y, x + w, y + h + crop_region = make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor, crop_min_size + ) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if w > drop_size and h > drop_size: + if is_contour: + mask_src = separated_mask + else: + mask_src = mask_i * separated_mask + + cropped_mask = np.array( + mask_src[ + crop_region[1]: crop_region[3], + crop_region[0]: crop_region[2], + ] + ) + + if bbox_fill: + cx1, cy1, _, _ = crop_region + bx1 = x - cx1 + bx2 = x+w - cx1 + by1 = y - cy1 + by2 = y+h - cy1 + cropped_mask[by1:by2, bx1:bx2] = 1.0 + + if cropped_mask is not None: + cropped_mask = torch.clip(torch.from_numpy(cropped_mask), 0, 1.0) + item = SEG(None, cropped_mask.numpy(), 1.0, crop_region, bbox, label, None) + result.append(item) + + if not result: + print(f"[mask_to_segs] Empty mask.") + + print(f"# of Detected SEGS: {len(result)}") + # for r in result: + # print(f"\tbbox={r.bbox}, crop={r.crop_region}, label={r.label}") + + # shape: (b,h,w) -> (h,w) + return (mask.shape[1], mask.shape[2]), result + + +def mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + parts = { + "face": np.array([0x0A, 0xC8, 0x0A]), + "mouth": np.array([0x0A, 0xB4, 0x0A]), + "left_eyebrow": np.array([0xB4, 0xDC, 0x0A]), + "left_eye": np.array([0xB4, 0xC8, 0x0A]), + "left_pupil": np.array([0xFA, 0xC8, 0x0A]), + "right_eyebrow": np.array([0x0A, 0xDC, 0xB4]), + "right_eye": np.array([0x0A, 0xC8, 0xB4]), + "right_pupil": np.array([0x0A, 0xC8, 0xFA]), + } + + def create_segments(image, color): + image = (image * 255).to(torch.uint8) + image = image.squeeze(0).numpy() + mask = cv2.inRange(image, color, color) + + contours, ctree = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + mask_list = [] + for i, contour in enumerate(contours): + hierarchy = ctree[0][i] + if hierarchy[3] == -1: + convex_hull = cv2.convexHull(contour) + convex_segment = np.zeros_like(image) + cv2.fillPoly(convex_segment, [convex_hull], (255, 255, 255)) + + convex_segment = np.expand_dims(convex_segment, axis=0).astype(np.float32) / 255.0 + tensor = torch.from_numpy(convex_segment) + mask_tensor = torch.any(tensor != 0, dim=-1).float() + mask_tensor = mask_tensor.squeeze(0) + mask_tensor = torch.from_numpy(dilate_mask(mask_tensor.numpy(), dilation)) + mask_list.append(mask_tensor.unsqueeze(0)) + + return mask_list + + segs = [] + + def create_seg(label): + mask_list = create_segments(image, parts[label]) + for mask in mask_list: + seg = mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size=drop_size, label=label, crop_min_size=crop_min_size) + if len(seg[1]) > 0: + segs.extend(seg[1]) + + if face: + create_seg('face') + + if mouth: + create_seg('mouth') + + if left_eyebrow: + create_seg('left_eyebrow') + + if left_eye: + create_seg('left_eye') + + if left_pupil: + create_seg('left_pupil') + + if right_eyebrow: + create_seg('right_eyebrow') + + if right_eye: + create_seg('right_eye') + + if right_pupil: + create_seg('right_pupil') + + return (image.shape[1], image.shape[2]), segs + + +def segs_to_combined_mask(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + mask = np.zeros((h, w), dtype=np.uint8) + + for seg in segs[1]: + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + + return torch.from_numpy(mask.astype(np.float32) / 255.0) + + +def segs_to_masklist(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + masks = [] + for seg in segs[1]: + if isinstance(seg.cropped_mask, np.ndarray): + cropped_mask = torch.from_numpy(seg.cropped_mask) + else: + cropped_mask = seg.cropped_mask + + if cropped_mask.ndim == 2: + cropped_mask = cropped_mask.unsqueeze(0) + + n = len(cropped_mask) + + mask = torch.zeros((n, h, w), dtype=torch.uint8) + crop_region = seg.crop_region + mask[:, crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).to(torch.uint8) + mask = (mask / 255.0).to(torch.float32) + + for x in mask: + masks.append(x) + + if len(masks) == 0: + empty_mask = torch.zeros((h, w), dtype=torch.float32, device="cpu") + masks = [empty_mask] + + return masks + + +def vae_decode(vae, samples, use_tile, hook, tile_size=512): + if use_tile: + pixels = nodes.VAEDecodeTiled().decode(vae, samples, tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, samples)[0] + + if hook is not None: + pixels = hook.post_decode(pixels) + + return pixels + + +def vae_encode(vae, pixels, use_tile, hook, tile_size=512): + if use_tile: + samples = nodes.VAEEncodeTiled().encode(vae, pixels, tile_size)[0] + else: + samples = nodes.VAEEncode().encode(vae, pixels)[0] + + if hook is not None: + samples = hook.post_encode(samples) + + return samples + + +def latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + + +def latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), old_pixels) + + +def latent_upscale_on_pixel_space(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + + +def latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] * scale_factor + h = pixels.shape[1] * scale_factor + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), old_pixels) + + +def latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + + +def latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + + # upscale by model upscaler + current_w = w + while current_w < new_w: + pixels = model_upscale.ImageUpscaleWithModel().upscale(upscale_model, pixels)[0] + current_w = pixels.shape[2] + if current_w == w: + print(f"[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), old_pixels) + + +def latent_upscale_on_pixel_space_with_model(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + +def latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + h = pixels.shape[1] + + new_w = w * scale_factor + new_h = h * scale_factor + + # upscale by model upscaler + current_w = w + while current_w < new_w: + pixels = model_upscale.ImageUpscaleWithModel().upscale(upscale_model, pixels)[0] + current_w = pixels.shape[2] + if current_w == w: + print(f"[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), old_pixels) + + +class TwoSamplersForMaskUpscaler: + def __init__(self, scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, hook_base_opt=None, hook_mask_opt=None, + hook_full_opt=None, + tile_size=512): + + mask = make_2d_mask(mask) + + mask = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) + + self.params = scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae + self.upscale_model = upscale_model_opt + self.full_sampler = full_sampler_opt + self.hook_base = hook_base_opt + self.hook_mask = hook_mask_opt + self.hook_full = hook_full_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + self.is_tiled = False + self.vae = vae + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + mask = make_2d_mask(mask) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def prepare_hook(self, step_info): + if self.hook_base is not None: + self.hook_base.set_steps(step_info) + if self.hook_mask is not None: + self.hook_mask.set_steps(step_info) + if self.hook_full is not None: + self.hook_full.set_steps(step_info) + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + mask = make_2d_mask(mask) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, + tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, + tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def is_full_sample_time(self, step_info, sample_schedule): + cur_step, total_step = step_info + + # make start from 1 instead of zero + cur_step += 1 + total_step += 1 + + if sample_schedule == "none": + return False + + elif sample_schedule == "interleave1": + return cur_step % 2 == 0 + + elif sample_schedule == "interleave2": + return cur_step % 3 == 0 + + elif sample_schedule == "interleave3": + return cur_step % 4 == 0 + + elif sample_schedule == "last1": + return cur_step == total_step + + elif sample_schedule == "last2": + return cur_step >= total_step - 1 + + elif sample_schedule == "interleave1+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave2+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave3+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + def do_samples(self, step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent): + mask = make_2d_mask(mask) + + if self.is_full_sample_time(step_info, sample_schedule): + print(f"step_info={step_info} / full time") + + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + sampler = self.full_sampler if self.full_sampler is not None else base_sampler + return sampler.sample(upscaled_latent, self.hook_full) + + else: + print(f"step_info={step_info} / non-full time") + # upscale mask + if mask.ndim == 2: + mask = mask[None, :, :, None] + upscaled_mask = F.interpolate(mask, size=(upscaled_latent['samples'].shape[2], upscaled_latent['samples'].shape[3]), mode='bilinear', align_corners=True) + upscaled_mask = upscaled_mask[:, :, :upscaled_latent['samples'].shape[2], :upscaled_latent['samples'].shape[3]] + + # base sampler + upscaled_inv_mask = torch.where(upscaled_mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + upscaled_latent['noise_mask'] = upscaled_inv_mask + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + + # mask sampler + upscaled_latent['noise_mask'] = upscaled_mask + upscaled_latent = mask_sampler.sample(upscaled_latent, self.hook_mask) + + # remove mask + del upscaled_latent['noise_mask'] + return upscaled_latent + + +class PixelKSampleUpscaler: + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, hook_opt=None, tile_size=512, scheduler_func=None, + tile_cnet_opt=None, tile_cnet_strength=1.0): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + self.is_tiled = False + self.vae = vae + self.scheduler_func = scheduler_func + self.tile_cnet = tile_cnet_opt + self.tile_cnet_strength = tile_cnet_strength + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, images): + if self.tile_cnet is not None: + image_batch, image_w, image_h, _ = images.shape + if image_batch > 1: + warnings.warn('Multiple latents in batch, Tile ControlNet being ignored') + else: + if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise RuntimeError("'TilePreprocessor' node (from comfyui_controlnet_aux) isn't installed.") + preprocessor = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']() + # might add capacity to set pyrUp_iters later, not needed for now though + preprocessed = preprocessor.execute(images, pyrUp_iters=3, resolution=min(image_w, image_h))[0] + apply_cnet = getattr(nodes.ControlNetApply(), nodes.ControlNetApply.FUNCTION) + positive = apply_cnet(positive, self.tile_cnet, preprocessed, strength=self.tile_cnet_strength)[0] + + refined_latent = impact_sampling.impact_sample(model, seed, steps, cfg, sampler_name, scheduler, + positive, negative, upscaled_latent, denoise, scheduler_func=self.scheduler_func) + + return refined_latent + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space2(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook, tile_size=512) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model2(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + refined_latent = self.sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, upscaled_images) + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook, + tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + refined_latent = self.sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, upscaled_images) + return refined_latent + + +class IPAdapterWrapper: + def __init__(self, ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, weight_v2, reference_image, neg_image=None, prev_control_net=None, combine_embeds='concat'): + self.reference_image = reference_image + self.ipadapter_pipe = ipadapter_pipe + self.weight = weight + self.weight_type = weight_type + self.noise = noise + self.start_at = start_at + self.end_at = end_at + self.unfold_batch = unfold_batch + self.prev_control_net = prev_control_net + self.weight_v2 = weight_v2 + self.image = reference_image + self.neg_image = neg_image + self.combine_embeds = combine_embeds + + # name 'apply_ipadapter' isn't allowed + def doit_ipadapter(self, model): + cnet_image_list = [self.image] + prev_cnet_images = [] + + if 'IPAdapterAdvanced' not in nodes.NODE_CLASS_MAPPINGS: + if 'IPAdapterApply' in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] 'ComfyUI IPAdapter Plus' is outdated.") + + utils.try_install_custom_node('https://github.com/cubiq/ComfyUI_IPAdapter_plus', + "To use 'IPAdapterApplySEGS' node, 'ComfyUI IPAdapter Plus' extension is required.") + raise Exception(f"[ERROR] To use IPAdapterApplySEGS, you need to install 'ComfyUI IPAdapter Plus'") + + obj = nodes.NODE_CLASS_MAPPINGS['IPAdapterAdvanced'] + + ipadapter, _, clip_vision, insightface, lora_loader = self.ipadapter_pipe + model = lora_loader(model) + + if self.prev_control_net is not None: + model, prev_cnet_images = self.prev_control_net.doit_ipadapter(model) + + model = obj().apply_ipadapter(model=model, ipadapter=ipadapter, weight=self.weight, weight_type=self.weight_type, + start_at=self.start_at, end_at=self.end_at, combine_embeds=self.combine_embeds, + clip_vision=clip_vision, image=self.image, image_negative=self.neg_image, attn_mask=None, + insightface=insightface, weight_faceidv2=self.weight_v2)[0] + + cnet_image_list.extend(prev_cnet_images) + + return model, cnet_image_list + + def apply(self, positive, negative, image, mask=None, use_acn=False): + if self.prev_control_net is not None: + return self.prev_control_net.apply(positive, negative, image, mask, use_acn=use_acn) + else: + return positive, negative, [] + + +class ControlNetWrapper: + def __init__(self, control_net, strength, preprocessor, prev_control_net=None, original_size=None, crop_region=None, control_image=None): + self.control_net = control_net + self.strength = strength + self.preprocessor = preprocessor + self.prev_control_net = prev_control_net + + if original_size is not None and crop_region is not None and control_image is not None: + self.control_image = utils.tensor_resize(control_image, original_size[1], original_size[0]) + self.control_image = torch.tensor(utils.tensor_crop(self.control_image, crop_region)) + else: + self.control_image = None + + def apply(self, positive, negative, image, mask=None, use_acn=False): + cnet_image_list = [] + prev_cnet_images = [] + + if self.prev_control_net is not None: + positive, negative, prev_cnet_images = self.prev_control_net.apply(positive, negative, image, mask, use_acn=use_acn) + + if self.control_image is not None: + cnet_image = self.control_image + elif self.preprocessor is not None: + cnet_image = self.preprocessor.apply(image, mask) + else: + cnet_image = image + + cnet_image_list.extend(prev_cnet_images) + cnet_image_list.append(cnet_image) + + if use_acn: + if "ACN_AdvancedControlNetApply" in nodes.NODE_CLASS_MAPPINGS: + acn = nodes.NODE_CLASS_MAPPINGS['ACN_AdvancedControlNetApply']() + positive, negative, _ = acn.apply_controlnet(positive=positive, negative=negative, control_net=self.control_net, image=cnet_image, + strength=self.strength, start_percent=0.0, end_percent=1.0) + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'ControlNetWrapper' for AnimateDiff, 'ComfyUI-Advanced-ControlNet' extension is required.") + raise Exception("'ACN_AdvancedControlNetApply' node isn't installed.") + else: + positive = nodes.ControlNetApply().apply_controlnet(positive, self.control_net, cnet_image, self.strength)[0] + + return positive, negative, cnet_image_list + + def doit_ipadapter(self, model): + if self.prev_control_net is not None: + return self.prev_control_net.doit_ipadapter(model) + else: + return model, [] + + +class ControlNetAdvancedWrapper: + def __init__(self, control_net, strength, start_percent, end_percent, preprocessor, prev_control_net=None, + original_size=None, crop_region=None, control_image=None): + self.control_net = control_net + self.strength = strength + self.preprocessor = preprocessor + self.prev_control_net = prev_control_net + self.start_percent = start_percent + self.end_percent = end_percent + + if original_size is not None and crop_region is not None and control_image is not None: + self.control_image = utils.tensor_resize(control_image, original_size[1], original_size[0]) + self.control_image = torch.tensor(utils.tensor_crop(self.control_image, crop_region)) + else: + self.control_image = None + + def doit_ipadapter(self, model): + if self.prev_control_net is not None: + return self.prev_control_net.doit_ipadapter(model) + else: + return model, [] + + def apply(self, positive, negative, image, mask=None, use_acn=False): + cnet_image_list = [] + prev_cnet_images = [] + + if self.prev_control_net is not None: + positive, negative, prev_cnet_images = self.prev_control_net.apply(positive, negative, image, mask) + + if self.control_image is not None: + cnet_image = self.control_image + elif self.preprocessor is not None: + cnet_image = self.preprocessor.apply(image, mask) + else: + cnet_image = image + + cnet_image_list.extend(prev_cnet_images) + cnet_image_list.append(cnet_image) + + if use_acn: + if "ACN_AdvancedControlNetApply" in nodes.NODE_CLASS_MAPPINGS: + acn = nodes.NODE_CLASS_MAPPINGS['ACN_AdvancedControlNetApply']() + positive, negative, _ = acn.apply_controlnet(positive=positive, negative=negative, control_net=self.control_net, image=cnet_image, + strength=self.strength, start_percent=self.start_percent, end_percent=self.end_percent) + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'ControlNetAdvancedWrapper' for AnimateDiff, 'ComfyUI-Advanced-ControlNet' extension is required.") + raise Exception("'ACN_AdvancedControlNetApply' node isn't installed.") + else: + positive, negative = nodes.ControlNetApplyAdvanced().apply_controlnet(positive, negative, self.control_net, cnet_image, self.strength, self.start_percent, self.end_percent) + + return positive, negative, cnet_image_list + + +# REQUIREMENTS: BlenderNeko/ComfyUI_TiledKSampler +class TiledKSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy + + def sample(self, latent_image, hook=None): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'TiledKSamplerProvider', 'Tiled sampling for ComfyUI' extension is required.") + raise Exception("'BNK_TiledKSampler' node isn't installed.") + + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise) + + return TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, positive, negative, latent_image, denoise)[0] + + +class PixelTiledKSampleUpscaler: + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, + denoise, + tile_width, tile_height, tiling_strategy, + upscale_model_opt=None, hook_opt=None, tile_cnet_opt=None, tile_size=512, tile_cnet_strength=1.0): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.vae = vae + self.tile_params = tile_width, tile_height, tiling_strategy + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.tile_cnet = tile_cnet_opt + self.tile_size = tile_size + self.is_tiled = True + self.tile_cnet_strength = tile_cnet_strength + + def tiled_ksample(self, latent, images): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'PixelTiledKSampleUpscalerProvider', 'Tiled sampling for ComfyUI' extension is required.") + raise RuntimeError("'BNK_TiledKSampler' node isn't installed.") + + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + tile_width, tile_height, tiling_strategy = self.tile_params + + if self.tile_cnet is not None: + image_batch, image_w, image_h, _ = images.shape + if image_batch > 1: + warnings.warn('Multiple latents in batch, Tile ControlNet being ignored') + else: + if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise RuntimeError("'TilePreprocessor' node (from comfyui_controlnet_aux) isn't installed.") + preprocessor = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']() + # might add capacity to set pyrUp_iters later, not needed for now though + preprocessed = preprocessor.execute(images, pyrUp_iters=3, resolution=min(image_w, image_h))[0] + apply_cnet = getattr(nodes.ControlNetApply(), nodes.ControlNetApply.FUNCTION) + positive = apply_cnet(positive, self.tile_cnet, preprocessed, strength=self.tile_cnet_strength)[0] + + return TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, positive, negative, latent, denoise)[0] + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space2(samples, scale_method, upscale_factor, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model2(samples, scale_method, self.upscale_model, + upscale_factor, vae, use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent, upscaled_images) + + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, + self.upscale_model, w, h, vae, + use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent, upscaled_images) + + return refined_latent + + +# REQUIREMENTS: biegert/ComfyUI-CLIPSeg +class BBoxDetectorBasedOnCLIPSeg: + prompt = None + blur = None + threshold = None + dilation_factor = None + aux = None + + def __init__(self, prompt, blur, threshold, dilation_factor): + self.prompt = prompt + self.blur = blur + self.threshold = threshold + self.dilation_factor = dilation_factor + + def detect(self, image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size=1, detailer_hook=None): + mask = self.detect_combined(image, bbox_threshold, bbox_dilation) + + mask = make_2d_mask(mask) + + segs = mask_to_segs(mask, False, bbox_crop_factor, True, drop_size, detailer_hook=detailer_hook) + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + + def detect_combined(self, image, bbox_threshold, bbox_dilation): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + CLIPSeg = nodes.NODE_CLASS_MAPPINGS['CLIPSeg'] + else: + utils.try_install_custom_node('https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py', + "To use 'CLIPSegDetectorProvider', 'CLIPSeg' extension is required.") + raise Exception("'CLIPSeg' node isn't installed.") + + if self.threshold is None: + threshold = bbox_threshold + else: + threshold = self.threshold + + if self.dilation_factor is None: + dilation_factor = bbox_dilation + else: + dilation_factor = self.dilation_factor + + prompt = self.aux if self.prompt == '' and self.aux is not None else self.prompt + + mask, _, _ = CLIPSeg().segment_image(image, prompt, self.blur, threshold, dilation_factor) + mask = to_binary_mask(mask) + return mask + + def setAux(self, x): + self.aux = x + + +def update_node_status(node, text, progress=None): + if PromptServer.instance.client_id is None: + return + + PromptServer.instance.send_sync("impact/update_status", { + "node": node, + "progress": progress, + "text": text + }, PromptServer.instance.client_id) + + +def random_mask_raw(mask, bbox, factor): + x1, y1, x2, y2 = bbox + w = x2 - x1 + h = y2 - y1 + + factor = max(6, int(min(w, h) * factor / 4)) + + def draw_random_circle(center, radius): + i, j = center + for x in range(int(i - radius), int(i + radius)): + for y in range(int(j - radius), int(j + radius)): + if np.linalg.norm(np.array([x, y]) - np.array([i, j])) <= radius: + mask[x, y] = 1 + + def draw_irregular_line(start, end, pivot, is_vertical): + i = start + while i < end: + base_radius = np.random.randint(5, factor) + radius = int(base_radius) + + if is_vertical: + draw_random_circle((i, pivot), radius) + else: + draw_random_circle((pivot, i), radius) + + i += radius + + def draw_irregular_line_parallel(start, end, pivot, is_vertical): + with ThreadPoolExecutor(max_workers=16) as executor: + futures = [] + step = (end - start) // 16 + for i in range(start, end, step): + future = executor.submit(draw_irregular_line, i, min(i + step, end), pivot, is_vertical) + futures.append(future) + + for future in futures: + future.result() + + draw_irregular_line_parallel(y1 + factor, y2 - factor, x1 + factor, True) + draw_irregular_line_parallel(y1 + factor, y2 - factor, x2 - factor, True) + draw_irregular_line_parallel(x1 + factor, x2 - factor, y1 + factor, False) + draw_irregular_line_parallel(x1 + factor, x2 - factor, y2 - factor, False) + + mask[y1 + factor:y2 - factor, x1 + factor:x2 - factor] = 1.0 + + +def random_mask(mask, bbox, factor, size=128): + small_mask = np.zeros((size, size)).astype(np.float32) + random_mask_raw(small_mask, (0, 0, size, size), factor) + + x1, y1, x2, y2 = bbox + small_mask = torch.tensor(small_mask).unsqueeze(0).unsqueeze(0) + bbox_mask = torch.nn.functional.interpolate(small_mask, size=(y2 - y1, x2 - x1), mode='bilinear', align_corners=False) + bbox_mask = bbox_mask.squeeze(0).squeeze(0) + mask[y1:y2, x1:x2] = bbox_mask + + +def adaptive_mask_paste(dest_mask, src_mask, bbox): + x1, y1, x2, y2 = bbox + small_mask = torch.tensor(src_mask).unsqueeze(0).unsqueeze(0) + bbox_mask = torch.nn.functional.interpolate(small_mask, size=(y2 - y1, x2 - x1), mode='bilinear', align_corners=False) + bbox_mask = bbox_mask.squeeze(0).squeeze(0) + dest_mask[y1:y2, x1:x2] = bbox_mask + + +def crop_condition_mask(mask, image, crop_region): + cond_scale = (mask.shape[1] / image.shape[1], mask.shape[2] / image.shape[2]) + mask_region = [round(v * cond_scale[i % 2]) for i, v in enumerate(crop_region)] + return crop_ndarray3(mask, mask_region) + + +class SafeToGPU: + def __init__(self, size): + self.size = size + + def to_device(self, obj, device): + if utils.is_same_device(device, 'cpu'): + obj.to(device) + else: + if utils.is_same_device(obj.device, 'cpu'): # cpu to gpu + model_management.free_memory(self.size * 1.3, device) + if model_management.get_free_memory(device) > self.size * 1.3: + try: + obj.to(device) + except: + print(f"WARN: The model is not moved to the '{device}' due to insufficient memory. [1]") + else: + print(f"WARN: The model is not moved to the '{device}' due to insufficient memory. [2]") + + +from comfy.cli_args import args, LatentPreviewMethod +import folder_paths +from latent_preview import TAESD, TAESDPreviewerImpl, Latent2RGBPreviewer + +try: + import comfy.latent_formats as latent_formats + + + def get_previewer(device, latent_format=latent_formats.SD15(), force=False, method=None): + previewer = None + + if method is None: + method = args.preview_method + + if method != LatentPreviewMethod.NoPreviews or force: + # TODO previewer methods + taesd_decoder_path = None + + if hasattr(latent_format, "taesd_decoder_path"): + taesd_decoder_path = folder_paths.get_full_path("vae_approx", latent_format.taesd_decoder_name) + + if method == LatentPreviewMethod.Auto: + method = LatentPreviewMethod.Latent2RGB + if taesd_decoder_path: + method = LatentPreviewMethod.TAESD + + if method == LatentPreviewMethod.TAESD: + if taesd_decoder_path: + taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) + previewer = TAESDPreviewerImpl(taesd) + else: + print("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format( + latent_format.taesd_decoder_name)) + + if previewer is None: + previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors) + return previewer + +except: + print(f"#########################################################################") + print(f"[ERROR] ComfyUI-Impact-Pack: Please update ComfyUI to the latest version.") + print(f"#########################################################################") diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/defs.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/defs.py new file mode 100644 index 0000000000000000000000000000000000000000..761a4eb107dfa1562ee7a201a6a611fabdd97d7c --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/defs.py @@ -0,0 +1,17 @@ +detection_labels = [ + 'hand', 'face', 'mouth', 'eyes', 'eyebrows', 'pupils', + 'left_eyebrow', 'left_eye', 'left_pupil', 'right_eyebrow', 'right_eye', 'right_pupil', + 'short_sleeved_shirt', 'long_sleeved_shirt', 'short_sleeved_outwear', 'long_sleeved_outwear', + 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress', 'sling_dress', + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", + "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", + "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", + "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", + "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", + "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + ] \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py new file mode 100644 index 0000000000000000000000000000000000000000..736052f8cf0a6effb8528474d965882013339625 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py @@ -0,0 +1,455 @@ +import impact.core as core +from nodes import MAX_RESOLUTION +import impact.segs_nodes as segs_nodes +import impact.utils as utils +import torch +from impact.core import SEG + + +class SAMDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", ), + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"],), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_hint_use_negative": (["False", "Small", "Outter"], ) + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + return (core.make_sam_mask(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative), ) + + +class SAMDetectorSegmented: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", ), + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"],), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_hint_use_negative": (["False", "Small", "Outter"], ) + } + } + + RETURN_TYPES = ("MASK", "MASK") + RETURN_NAMES = ("combined_mask", "batch_masks") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + combined_mask, batch_masks = core.make_sam_mask_segmented(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, + mask_hint_use_negative) + return (combined_mask, batch_masks, ) + + +class BboxDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, bbox_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: BboxDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs = bbox_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SegmDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs = segm_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation): + mask = segm_detector.detect_combined(image, threshold, dilation) + + if mask is None: + mask = torch.zeros((image.shape[2], image.shape[1]), dtype=torch.float32, device="cpu") + + return (mask.unsqueeze(0),) + + +class BboxDetectorCombined(SegmDetectorCombined): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 4, "min": -512, "max": 512, "step": 1}), + } + } + + def doit(self, bbox_detector, image, threshold, dilation): + mask = bbox_detector.detect_combined(image, threshold, dilation) + + if mask is None: + mask = torch.zeros((image.shape[2], image.shape[1]), dtype=torch.float32, device="cpu") + + return (mask.unsqueeze(0),) + + +class SimpleDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None, + detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + if segm_detector_opt is not None and hasattr(segm_detector_opt, 'bbox_detector') and segm_detector_opt.bbox_detector == bbox_detector: + # Better segm support for YOLO-World detector + segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + else: + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + segs = core.dilate_segs(segs, post_dilation) + + return (segs,) + + def doit(self, bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=post_dilation, + sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt) + + +class SimpleDetectorForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "detailer_pipe": ("DETAILER_PIPE", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, detailer_pipe, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, post_dilation=0): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=post_dilation, sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt, + detailer_hook=detailer_hook) + + +class SimpleDetectorForAnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image_frames": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "masking_mode": (["Pivot SEGS", "Combine neighboring frames", "Don't combine"],), + "segs_pivot": (["Combined mask", "1st frame mask"],), + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None): + + h = image_frames.shape[1] + w = image_frames.shape[2] + + # gather segs for all frames + segs_by_frames = [] + for image in image_frames: + image = image.unsqueeze(0) + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + segs_by_frames.append(segs) + + def get_masked_frames(): + masks_by_frame = [] + for i, segs in enumerate(segs_by_frames): + masks_in_frame = segs_nodes.SEGSToMaskList().doit(segs)[0] + current_frame_mask = (masks_in_frame[0] * 255).to(torch.uint8) + + for mask in masks_in_frame[1:]: + current_frame_mask |= (mask * 255).to(torch.uint8) + + current_frame_mask = (current_frame_mask/255.0).to(torch.float32) + current_frame_mask = utils.to_binary_mask(current_frame_mask, 0.1)[0] + + masks_by_frame.append(current_frame_mask) + + return masks_by_frame + + def get_empty_mask(): + return torch.zeros((h, w), dtype=torch.float32, device="cpu") + + def get_neighboring_mask_at(i, masks_by_frame): + prv = masks_by_frame[i-1] if i > 1 else get_empty_mask() + cur = masks_by_frame[i] + nxt = masks_by_frame[i-1] if i > 1 else get_empty_mask() + + prv = prv if prv is not None else get_empty_mask() + cur = cur.clone() if cur is not None else get_empty_mask() + nxt = nxt if nxt is not None else get_empty_mask() + + return prv, cur, nxt + + def get_merged_neighboring_mask(masks_by_frame): + if len(masks_by_frame) <= 1: + return masks_by_frame + + result = [] + for i in range(0, len(masks_by_frame)): + prv, cur, nxt = get_neighboring_mask_at(i, masks_by_frame) + cur = (cur * 255).to(torch.uint8) + cur |= (prv * 255).to(torch.uint8) + cur |= (nxt * 255).to(torch.uint8) + cur = (cur / 255.0).to(torch.float32) + cur = utils.to_binary_mask(cur, 0.1)[0] + result.append(cur) + + return result + + def get_whole_merged_mask(): + all_masks = [] + for segs in segs_by_frames: + all_masks += segs_nodes.SEGSToMaskList().doit(segs)[0] + + merged_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + merged_mask |= (mask * 255).to(torch.uint8) + + merged_mask = (merged_mask / 255.0).to(torch.float32) + merged_mask = utils.to_binary_mask(merged_mask, 0.1)[0] + return merged_mask + + def get_pivot_segs(): + if segs_pivot == "1st frame mask": + return segs_by_frames[0][1] + else: + merged_mask = get_whole_merged_mask() + return segs_nodes.MaskToSEGS.doit(merged_mask, False, crop_factor, False, drop_size, contour_fill=True)[0] + + def get_segs(merged_neighboring=False): + pivot_segs = get_pivot_segs() + + masks_by_frame = get_masked_frames() + if merged_neighboring: + masks_by_frame = get_merged_neighboring_mask(masks_by_frame) + + new_segs = [] + for seg in pivot_segs[1]: + cropped_mask = torch.zeros(seg.cropped_mask.shape, dtype=torch.float32, device="cpu").unsqueeze(0) + pivot_mask = torch.from_numpy(seg.cropped_mask) + x1, y1, x2, y2 = seg.crop_region + for mask in masks_by_frame: + cropped_mask_at_frame = (mask[y1:y2, x1:x2] * pivot_mask).unsqueeze(0) + cropped_mask = torch.cat((cropped_mask, cropped_mask_at_frame), dim=0) + + if len(cropped_mask) > 1: + cropped_mask = cropped_mask[1:] + + new_seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return pivot_segs[0], new_segs + + # create result mask + if masking_mode == "Pivot SEGS": + return (get_pivot_segs(), ) + + elif masking_mode == "Combine neighboring frames": + return (get_segs(merged_neighboring=True), ) + + else: # elif masking_mode == "Don't combine": + return (get_segs(merged_neighboring=False), ) + + def doit(self, bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForAnimateDiff.detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode, segs_pivot, sam_model_opt, segm_detector_opt) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..b93e040e55225e532faf5afa064b56b87e39e177 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py @@ -0,0 +1,188 @@ +import comfy +import re +from impact.utils import * + +hf_transformer_model_urls = [ + "rizvandwiki/gender-classification-2", + "NTQAI/pedestrian_gender_recognition", + "Leilab/gender_class", + "ProjectPersonal/GenderClassifier", + "crangana/trained-gender", + "cledoux42/GenderNew_v002", + "ivensamdh/genderage2" +] + + +class HF_TransformersClassifierProvider: + @classmethod + def INPUT_TYPES(s): + global hf_transformer_model_urls + return {"required": { + "preset_repo_id": (hf_transformer_model_urls + ['Manual repo id'],), + "manual_repo_id": ("STRING", {"multiline": False}), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + }, + } + + RETURN_TYPES = ("TRANSFORMERS_CLASSIFIER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + def doit(self, preset_repo_id, manual_repo_id, device_mode): + from transformers import pipeline + + if preset_repo_id == 'Manual repo id': + url = manual_repo_id + else: + url = preset_repo_id + + if device_mode != 'CPU': + device = comfy.model_management.get_torch_device() + else: + device = "cpu" + + classifier = pipeline('image-classification', model=url, device=device) + + return (classifier,) + + +preset_classify_expr = [ + '#Female > #Male', + '#Female < #Male', + 'female > 0.5', + 'male > 0.5', + 'Age16to25 > 0.1', + 'Age50to69 > 0.1', +] + +symbolic_label_map = { + '#Female': {'female', 'Female', 'Human Female', 'woman', 'women', 'girl'}, + '#Male': {'male', 'Male', 'Human Male', 'man', 'men', 'boy'} +} + +def is_numeric_string(input_str): + return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None + + +classify_expr_pattern = r'([^><= ]+)\s*(>|<|>=|<=|=)\s*([^><= ]+)' + + +class SEGS_Classify: + @classmethod + def INPUT_TYPES(s): + global preset_classify_expr + return {"required": { + "classifier": ("TRANSFORMERS_CLASSIFIER",), + "segs": ("SEGS",), + "preset_expr": (preset_classify_expr + ['Manual expr'],), + "manual_expr": ("STRING", {"multiline": False}), + }, + "optional": { + "ref_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("SEGS", "SEGS", "STRING") + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS", "detected_labels") + OUTPUT_IS_LIST = (False, False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + @staticmethod + def lookup_classified_label_score(score_infos, label): + global symbolic_label_map + + if label.startswith('#'): + if label not in symbolic_label_map: + return None + else: + label = symbolic_label_map[label] + else: + label = {label} + + for x in score_infos: + if x['label'] in label: + return x['score'] + + return None + + def doit(self, classifier, segs, preset_expr, manual_expr, ref_image_opt=None): + if preset_expr == 'Manual expr': + expr_str = manual_expr + else: + expr_str = preset_expr + + match = re.match(classify_expr_pattern, expr_str) + + if match is None: + return (segs[0], []), segs, [] + + a = match.group(1) + op = match.group(2) + b = match.group(3) + + a_is_lab = not is_numeric_string(a) + b_is_lab = not is_numeric_string(b) + + classified = [] + remained_SEGS = [] + provided_labels = set() + + for seg in segs[1]: + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif ref_image_opt is not None: + # take from original image + cropped_image = crop_image(ref_image_opt, seg.crop_region) + + if cropped_image is not None: + cropped_image = to_pil(cropped_image) + res = classifier(cropped_image) + classified.append((seg, res)) + + for x in res: + provided_labels.add(x['label']) + else: + remained_SEGS.append(seg) + + filtered_SEGS = [] + for seg, res in classified: + if a_is_lab: + avalue = SEGS_Classify.lookup_classified_label_score(res, a) + else: + avalue = a + + if b_is_lab: + bvalue = SEGS_Classify.lookup_classified_label_score(res, b) + else: + bvalue = b + + if avalue is None or bvalue is None: + remained_SEGS.append(seg) + continue + + avalue = float(avalue) + bvalue = float(bvalue) + + if op == '>': + cond = avalue > bvalue + elif op == '<': + cond = avalue < bvalue + elif op == '>=': + cond = avalue >= bvalue + elif op == '<=': + cond = avalue <= bvalue + else: + cond = avalue == bvalue + + if cond: + filtered_SEGS.append(seg) + else: + remained_SEGS.append(seg) + + return (segs[0], filtered_SEGS), (segs[0], remained_SEGS), list(provided_labels) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..7be1989db963984166c4a47953512b6428ad9ea6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py @@ -0,0 +1,83 @@ +import sys +from . import hooks +from . import defs + + +class SEGSOrderedFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, target, order, take_start, take_count): + hook = hooks.SEGSOrderedFilterDetailerHook(target, order, take_start, take_count) + return (hook, ) + + +class SEGSRangeFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, target, mode, min_value, max_value): + hook = hooks.SEGSRangeFilterDetailerHook(target, mode, min_value, max_value) + return (hook, ) + + +class SEGSLabelFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "preset": (['all'] + defs.detection_labels,), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, preset, labels): + hook = hooks.SEGSLabelFilterDetailerHook(labels) + return (hook, ) + + +class PreviewDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"quality": ("INT", {"default": 95, "min": 20, "max": 100})}, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("DETAILER_HOOK", "UPSCALER_HOOK") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, quality, unique_id): + hook = hooks.PreviewDetailerHook(unique_id, quality) + return (hook, hook) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..393fae500fac9a245b1aec37ba2837d0e55efd1e --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py @@ -0,0 +1,518 @@ +import copy +import torch +import nodes +from impact import utils +from . import segs_nodes +from thirdparty import noise_nodes +from server import PromptServer +import asyncio +import folder_paths +import os +from comfy_extras import nodes_custom_sampler +import math + + +class PixelKSampleHook: + cur_step = 0 + total_step = 0 + + def __init__(self): + pass + + def set_steps(self, info): + self.cur_step, self.total_step = info + + def post_decode(self, pixels): + return pixels + + def post_upscale(self, pixels): + return pixels + + def post_encode(self, samples): + return samples + + def pre_decode(self, samples): + return samples + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + def post_crop_region(self, w, h, item_bbox, crop_region): + return crop_region + + def touch_scaled_size(self, w, h): + return w, h + + +class PixelKSampleHookCombine(PixelKSampleHook): + hook1 = None + hook2 = None + + def __init__(self, hook1, hook2): + super().__init__() + self.hook1 = hook1 + self.hook2 = hook2 + + def set_steps(self, info): + self.hook1.set_steps(info) + self.hook2.set_steps(info) + + def pre_decode(self, samples): + return self.hook2.pre_decode(self.hook1.pre_decode(samples)) + + def post_decode(self, pixels): + return self.hook2.post_decode(self.hook1.post_decode(pixels)) + + def post_upscale(self, pixels): + return self.hook2.post_upscale(self.hook1.post_upscale(pixels)) + + def post_encode(self, samples): + return self.hook2.post_encode(self.hook1.post_encode(samples)) + + def post_crop_region(self, w, h, item_bbox, crop_region): + crop_region = self.hook1.post_crop_region(w, h, item_bbox, crop_region) + return self.hook2.post_crop_region(w, h, item_bbox, crop_region) + + def touch_scaled_size(self, w, h): + w, h = self.hook1.touch_scaled_size(w, h) + return self.hook2.touch_scaled_size(w, h) + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook1.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + return self.hook2.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + +class DetailerHookCombine(PixelKSampleHookCombine): + def cycle_latent(self, latent): + latent = self.hook1.cycle_latent(latent) + latent = self.hook2.cycle_latent(latent) + return latent + + def post_detection(self, segs): + segs = self.hook1.post_detection(segs) + segs = self.hook2.post_detection(segs) + return segs + + def post_paste(self, image): + image = self.hook1.post_paste(image) + image = self.hook2.post_paste(image) + return image + + def get_custom_noise(self, seed, noise, is_touched): + noise_1st, is_touched = self.hook1.get_custom_noise(seed, noise, is_touched) + noise_2nd, is_touched = self.hook2.get_custom_noise(seed, noise, is_touched) + return noise, is_touched + + +class SimpleCfgScheduleHook(PixelKSampleHook): + target_cfg = 0 + + def __init__(self, target_cfg): + super().__init__() + self.target_cfg = target_cfg + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_cfg - cfg + current_cfg = int(cfg + gap * progress) + else: + current_cfg = self.target_cfg + + return model, seed, steps, current_cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + +class SimpleDenoiseScheduleHook(PixelKSampleHook): + def __init__(self, target_denoise): + super().__init__() + self.target_denoise = target_denoise + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_denoise - denoise + current_denoise = denoise + gap * progress + else: + current_denoise = self.target_denoise + + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, current_denoise + + +class SimpleStepsScheduleHook(PixelKSampleHook): + def __init__(self, target_steps): + super().__init__() + self.target_steps = target_steps + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_steps - steps + current_steps = int(steps + gap * progress) + else: + current_steps = self.target_steps + + return model, seed, current_steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + +class DetailerHook(PixelKSampleHook): + def cycle_latent(self, latent): + return latent + + def post_detection(self, segs): + return segs + + def post_paste(self, image): + return image + + def get_custom_noise(self, seed, noise, is_touched): + return noise, is_touched + + +# class CustomNoiseDetailerHookProvider(DetailerHook): +# def __init__(self, noise): +# super().__init__() +# self.noise = noise +# +# def get_custom_noise(self, seed, noise, is_start): +# return self.noise + + +class VariationNoiseDetailerHookProvider(DetailerHook): + def __init__(self, variation_seed, variation_strength): + super().__init__() + self.variation_seed = variation_seed + self.variation_strength = variation_strength + + def get_custom_noise(self, seed, noise, is_touched): + empty_noise = {'samples': torch.zeros(noise.size())} + if not is_touched: + noise = nodes_custom_sampler.Noise_RandomNoise(seed).generate_noise(empty_noise) + noise_2nd = nodes_custom_sampler.Noise_RandomNoise(self.variation_seed).generate_noise(empty_noise) + + mixed_noise = ((1 - self.variation_strength) * noise + self.variation_strength * noise_2nd) + + # NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling. + scale_factor = math.sqrt((1 - self.variation_strength) ** 2 + self.variation_strength ** 2) + corrected_noise = mixed_noise / scale_factor # Scale the noise to maintain variance of 1 + + return corrected_noise, True + + +class SimpleDetailerDenoiseSchedulerHook(DetailerHook): + def __init__(self, target_denoise): + super().__init__() + self.target_denoise = target_denoise + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_denoise - denoise + current_denoise = denoise + gap * progress + else: + # ignore hook if total cycle <= 1 + current_denoise = denoise + + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, current_denoise + + +class CoreMLHook(DetailerHook): + def __init__(self, mode): + super().__init__() + resolution = mode.split('x') + + self.w = int(resolution[0]) + self.h = int(resolution[1]) + + self.override_bbox_by_segm = False + + def pre_decode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'][0].unsqueeze(0) + return new_samples + + def post_encode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'].repeat(2, 1, 1, 1) + return new_samples + + def post_crop_region(self, w, h, item_bbox, crop_region): + x1, y1, x2, y2 = crop_region + bx1, by1, bx2, by2 = item_bbox + crop_w = x2-x1 + crop_h = y2-y1 + + crop_ratio = crop_w/crop_h + target_ratio = self.w/self.h + if crop_ratio < target_ratio: + # shrink height + top_gap = by1 - y1 + bottom_gap = y2 - by2 + + gap_ratio = top_gap / bottom_gap + + target_height = 1/target_ratio*crop_w + delta_height = crop_h - target_height + + new_y1 = int(y1 + delta_height*gap_ratio) + new_y2 = int(new_y1 + target_height) + crop_region = x1, new_y1, x2, new_y2 + + elif crop_ratio > target_ratio: + # shrink width + left_gap = bx1 - x1 + right_gap = x2 - bx2 + + gap_ratio = left_gap / right_gap + + target_width = target_ratio*crop_h + delta_width = crop_w - target_width + + new_x1 = int(x1 + delta_width*gap_ratio) + new_x2 = int(new_x1 + target_width) + crop_region = new_x1, y1, new_x2, y2 + + return crop_region + + def touch_scaled_size(self, w, h): + return self.w, self.h + + +# REQUIREMENTS: BlenderNeko/ComfyUI Noise +class InjectNoiseHook(PixelKSampleHook): + def __init__(self, source, seed, start_strength, end_strength): + super().__init__() + self.source = source + self.seed = seed + self.start_strength = start_strength + self.end_strength = end_strength + + def post_encode(self, samples): + cur_step = self.cur_step + + size = samples['samples'].shape + seed = cur_step + self.seed + cur_step + + if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS: + NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"] + InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise', + "To use 'NoiseInjectionHookProvider', 'ComfyUI Noise' extension is required.") + raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.") + + noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0] + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / self.total_step + samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0] + print(f"[Impact Pack] InjectNoiseHook: strength = {strength}") + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + +class UnsamplerHook(PixelKSampleHook): + def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative): + super().__init__() + self.model = model + self.cfg = cfg + self.sampler_name = sampler_name + self.steps = steps + self.start_end_at_step = start_end_at_step + self.end_end_at_step = end_end_at_step + self.scheduler = scheduler + self.normalize = normalize + self.positive = positive + self.negative = negative + + def post_encode(self, samples): + cur_step = self.cur_step + + Unsampler = noise_nodes.Unsampler + + end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / self.total_step + end_at_step = int(end_at_step) + + print(f"[Impact Pack] UnsamplerHook: end_at_step = {end_at_step}") + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step, + self.scheduler, self.normalize, self.positive, self.negative, samples)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + +class InjectNoiseHookForDetailer(DetailerHook): + def __init__(self, source, seed, start_strength, end_strength, from_start=False): + super().__init__() + self.source = source + self.seed = seed + self.start_strength = start_strength + self.end_strength = end_strength + self.from_start = from_start + + def inject_noise(self, samples): + cur_step = self.cur_step if self.from_start else self.cur_step - 1 + total_step = self.total_step if self.from_start else self.total_step - 1 + + size = samples['samples'].shape + seed = cur_step + self.seed + cur_step + + if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS: + NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"] + InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise', + "To use 'NoiseInjectionDetailerHookProvider', 'ComfyUI Noise' extension is required.") + raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.") + + noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0] + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / total_step + samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + def cycle_latent(self, latent): + if self.cur_step == 0 and not self.from_start: + return latent + else: + return self.inject_noise(latent) + + +class UnsamplerDetailerHook(DetailerHook): + def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, from_start=False): + super().__init__() + self.model = model + self.cfg = cfg + self.sampler_name = sampler_name + self.steps = steps + self.start_end_at_step = start_end_at_step + self.end_end_at_step = end_end_at_step + self.scheduler = scheduler + self.normalize = normalize + self.positive = positive + self.negative = negative + self.from_start = from_start + + def unsample(self, samples): + cur_step = self.cur_step if self.from_start else self.cur_step - 1 + total_step = self.total_step if self.from_start else self.total_step - 1 + + Unsampler = noise_nodes.Unsampler + + end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / total_step + end_at_step = int(end_at_step) + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step, + self.scheduler, self.normalize, self.positive, self.negative, samples)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + def cycle_latent(self, latent): + if self.cur_step == 0 and not self.from_start: + return latent + else: + return self.unsample(latent) + + +class SEGSOrderedFilterDetailerHook(DetailerHook): + def __init__(self, target, order, take_start, take_count): + super().__init__() + self.target = target + self.order = order + self.take_start = take_start + self.take_count = take_count + + def post_detection(self, segs): + return segs_nodes.SEGSOrderedFilter().doit(segs, self.target, self.order, self.take_start, self.take_count)[0] + + +class SEGSRangeFilterDetailerHook(DetailerHook): + def __init__(self, target, mode, min_value, max_value): + super().__init__() + self.target = target + self.mode = mode + self.min_value = min_value + self.max_value = max_value + + def post_detection(self, segs): + return segs_nodes.SEGSRangeFilter().doit(segs, self.target, self.mode, self.min_value, self.max_value)[0] + + +class SEGSLabelFilterDetailerHook(DetailerHook): + def __init__(self, labels): + super().__init__() + self.labels = labels + + def post_detection(self, segs): + return segs_nodes.SEGSLabelFilter().doit(segs, "", self.labels)[0] + + +class PreviewDetailerHook(DetailerHook): + def __init__(self, node_id, quality): + super().__init__() + self.node_id = node_id + self.quality = quality + + async def send(self, image): + if len(image) > 0: + image = image[0].unsqueeze(0) + img = utils.tensor2pil(image) + + temp_path = os.path.join(folder_paths.get_temp_directory(), 'pvhook') + + if not os.path.exists(temp_path): + os.makedirs(temp_path) + + fullpath = os.path.join(temp_path, f"{self.node_id}.webp") + img.save(fullpath, quality=self.quality) + + item = { + "filename": f"{self.node_id}.webp", + "subfolder": 'pvhook', + "type": 'temp' + } + + PromptServer.instance.send_sync("impact-preview", {'node_id': self.node_id, 'item': item}) + + def post_paste(self, image): + asyncio.run(self.send(image)) + return image diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py new file mode 100644 index 0000000000000000000000000000000000000000..f620ef73017a0c787577b6dd171773c26317d309 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py @@ -0,0 +1,2172 @@ +import os +import sys + +import comfy.samplers +import comfy.sd +import warnings +from segment_anything import sam_model_registry +from io import BytesIO +import piexif +import zipfile +import re + +import impact.wildcards + +from impact.utils import * +import impact.core as core +from impact.core import SEG +from impact.config import latent_letter_path +from nodes import MAX_RESOLUTION +from PIL import Image, ImageOps +import numpy as np +import hashlib +import json +import safetensors.torch +from PIL.PngImagePlugin import PngInfo +import comfy.model_management +import base64 +import impact.wildcards as wildcards +from . import hooks + +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') + +model_path = folder_paths.models_dir + + +# folder_paths.supported_pt_extensions +add_folder_path_and_extensions("mmdets_bbox", [os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("mmdets_segm", [os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("mmdets", [os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) + + +# Nodes +class ONNXDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "load_onnx" + + CATEGORY = "ImpactPack" + + def load_onnx(self, model_name): + model = folder_paths.get_full_path("onnx", model_name) + return (core.ONNXDetector(model), ) + + +class CLIPSegDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": False}), + "blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7}), + "threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4}), + "dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4}), + } + } + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, text, blur, threshold, dilation_factor): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) + else: + print("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") + + +class SAMLoader: + @classmethod + def INPUT_TYPES(cls): + models = [x for x in folder_paths.get_filename_list("sams") if 'hq' not in x] + return { + "required": { + "model_name": (models + ['ESAM'], ), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + } + } + + RETURN_TYPES = ("SAM_MODEL", ) + FUNCTION = "load_model" + + CATEGORY = "ImpactPack" + + def load_model(self, model_name, device_mode="auto"): + if model_name == 'ESAM': + if 'ESAM_ModelLoader_Zho' not in nodes.NODE_CLASS_MAPPINGS: + try_install_custom_node('https://github.com/ZHO-ZHO-ZHO/ComfyUI-YoloWorld-EfficientSAM', + "To use 'ESAM' model, 'ComfyUI-YoloWorld-EfficientSAM' extension is required.") + raise Exception("'ComfyUI-YoloWorld-EfficientSAM' node isn't installed.") + + esam_loader = nodes.NODE_CLASS_MAPPINGS['ESAM_ModelLoader_Zho']() + + if device_mode == 'CPU': + esam = esam_loader.load_esam_model('CPU')[0] + else: + device_mode = 'CUDA' + esam = esam_loader.load_esam_model('CUDA')[0] + + sam_obj = core.ESAMWrapper(esam, device_mode) + esam.sam_wrapper = sam_obj + + print(f"Loads EfficientSAM model: (device:{device_mode})") + return (esam, ) + + modelname = folder_paths.get_full_path("sams", model_name) + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam = sam_model_registry[model_kind](checkpoint=modelname) + size = os.path.getsize(modelname) + safe_to = core.SafeToGPU(size) + + # Unless user explicitly wants to use CPU, we use GPU + device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" + + if device_mode == "Prefer GPU": + safe_to.to_device(sam, device) + + is_auto_mode = device_mode == "AUTO" + + sam_obj = core.SAMWrapper(sam, is_auto_mode=is_auto_mode, safe_to_gpu=safe_to) + sam.sam_wrapper = sam_obj + + print(f"Loads SAM model: {modelname} (device:{device_mode})") + return (sam, ) + + +class ONNXDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "onnx_detector": ("ONNX_DETECTOR",), + "image": ("IMAGE",), + "threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + OUTPUT_NODE = True + + def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): + segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) + return (segs, ) + + +class DetailerForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + image = image.clone() + enhanced_alpha_list = [] + enhanced_list = [] + cropped_list = [] + cnet_pil_list = [] + + segs = core.segs_scale_match(segs, image.shape) + new_segs = [] + + wildcard_concat_mode = None + if wildcard_opt is not None: + if wildcard_opt.startswith('[CONCAT]'): + wildcard_concat_mode = 'concat' + wildcard_opt = wildcard_opt[8:] + wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) + else: + wmode, wildcard_chooser = None, None + + if wmode in ['ASC', 'DSC']: + if wmode == 'ASC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) + else: + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) + else: + ordered_segs = segs[1] + + for i, seg in enumerate(ordered_segs): + cropped_image = crop_ndarray4(image.cpu().numpy(), seg.crop_region) # Never use seg.cropped_image to handle overlapping area + cropped_image = to_tensor(cropped_image) + mask = to_tensor(seg.cropped_mask) + mask = tensor_gaussian_blur_mask(mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + if wildcard_chooser is not None and wmode != "LAB": + seg_seed, wildcard_item = wildcard_chooser.get(seg) + elif wildcard_chooser is not None and wmode == "LAB": + seg_seed, wildcard_item = None, wildcard_chooser.get(seg) + else: + seg_seed, wildcard_item = None, None + + seg_seed = seed + i if seg_seed is None else seg_seed + + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + + if not isinstance(negative, str): + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + else: + # Negative Conditioning is placeholder such as FLUX.1 + cropped_negative = negative + + enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + seg.bbox, seg_seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, + wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, + detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func=scheduler_func_opt) + + if cnet_pils is not None: + cnet_pil_list.extend(cnet_pils) + + if not (enhanced_image is None): + # don't latent composite-> converting to latent caused poor quality + # use image paste + image = image.cpu() + enhanced_image = enhanced_image.cpu() + tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) + enhanced_list.append(enhanced_image) + + if detailer_hook is not None: + image = detailer_hook.post_paste(image) + + if not (enhanced_image is None): + # Convert enhanced_pil_alpha to RGBA mode + enhanced_image_alpha = tensor_convert_rgba(enhanced_image) + new_seg_image = enhanced_image.numpy() # alpha should not be applied to seg_image + + # Apply the mask + mask = tensor_resize(mask, *tensor_get_size(enhanced_image)) + tensor_putalpha(enhanced_image_alpha, mask) + enhanced_alpha_list.append(enhanced_image_alpha) + else: + new_seg_image = None + + cropped_list.append(cropped_image) + + new_seg = SEG(new_seg_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + image_tensor = tensor_convert_rgb(image) + + cropped_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) + + return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, + detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + enhanced_img, *_ = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + return (enhanced_img, ) + + +class DetailerForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") + RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, new_segs, basic_pipe, cnet_pil_list + + +class FaceDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + }} + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + @staticmethod + def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, + bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1, + inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + # make default prompt as 'face' if empty prompt for CLIPSeg + bbox_detector.setAux('face') + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size, detailer_hook=detailer_hook) + bbox_detector.setAux(None) + + # bbox + sam combination + if sam_model_opt is not None: + sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, + sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, ) + segs = core.segs_bitwise_and_mask(segs, sam_mask) + + elif segm_detector is not None: + segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) + + if (hasattr(segm_detector, 'override_bbox_by_segm') and segm_detector.override_bbox_by_segm and + not (detailer_hook is not None and not hasattr(detailer_hook, 'override_bbox_by_segm'))): + segs = segm_segs + else: + segm_mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, segm_mask) + + if len(segs[1]) > 0: + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard_opt, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + else: + enhanced_img = image + cropped_enhanced = [] + cropped_enhanced_alpha = [] + cnet_pil_list = [] + + # Mask Generator + mask = core.segs_to_combined_mask(segs) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list + + def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, + sam_model_opt=None, segm_detector_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + result_img = None + result_mask = None + result_cropped_enhanced = [] + result_cropped_enhanced_alpha = [] + result_cnet_images = [] + + if len(image) > 1: + print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") + + for i, single_image in enumerate(image): + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img + result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask + result_cropped_enhanced.extend(cropped_enhanced) + result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) + result_cnet_images.extend(cnet_pil_list) + + pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) + return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, pipe, result_cnet_images + + +class LatentPixelScale: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "scale_method": (s.upscale_methods,), + "scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.1}), + "vae": ("VAE", ), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + } + } + + RETURN_TYPES = ("LATENT", "IMAGE") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): + if upscale_model_opt is None: + latimg = core.latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) + else: + latimg = core.latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) + return latimg + + +class NoiseInjectionDetailerHookProvider: + schedules = ["skip_start", "from_start"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_cycle": (s.schedules,), + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "start_strength": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 200.0, "step": 0.01}), + "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, schedule_for_cycle, source, seed, start_strength, end_strength): + try: + hook = hooks.InjectNoiseHookForDetailer(source, seed, start_strength, end_strength, + from_start=('from_start' in schedule_for_cycle)) + return (hook, ) + except Exception as e: + print("[ERROR] NoiseInjectionDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +# class CustomNoiseDetailerHookProvider: +# @classmethod +# def INPUT_TYPES(s): +# return {"required": { +# "noise": ("NOISE",)}, +# } +# +# RETURN_TYPES = ("DETAILER_HOOK",) +# FUNCTION = "doit" +# +# CATEGORY = "ImpactPack/Detailer" +# +# def doit(self, noise): +# hook = hooks.CustomNoiseDetailerHookProvider(noise) +# return (hook, ) + + +class VariationNoiseDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01})} + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, seed, strength): + hook = hooks.VariationNoiseDetailerHookProvider(seed, strength) + return (hook, ) + + +class UnsamplerDetailerHookProvider: + schedules = ["skip_start", "from_start"] + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), + "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), + "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "normalize": (["disable", "enable"], ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "schedule_for_cycle": (s.schedules,), + }} + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, schedule_for_cycle): + try: + hook = hooks.UnsamplerDetailerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, + from_start=('from_start' in schedule_for_cycle)) + + return (hook, ) + except Exception as e: + print("[ERROR] UnsamplerDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +class DenoiseSchedulerDetailerHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_cycle": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, schedule_for_cycle, target_denoise): + hook = hooks.SimpleDetailerDenoiseSchedulerHook(target_denoise) + return (hook, ) + + +class CoreMLDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"mode": (["512x512", "768x768", "512x768", "768x512"], )}, } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, mode): + hook = hooks.CoreMLHook(mode) + return (hook, ) + + +class CfgScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_cfg): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleCfgScheduleHook(target_cfg) + + return (hook, ) + + +class UnsamplerHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), + "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), + "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "normalize": (["disable", "enable"], ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "schedule_for_iteration": (s.schedules,), + }} + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, schedule_for_iteration): + try: + hook = None + if schedule_for_iteration == "simple": + hook = hooks.UnsamplerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative) + + return (hook, ) + except Exception as e: + print("[ERROR] UnsamplerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +class NoiseInjectionHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "start_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, source, seed, start_strength, end_strength): + try: + hook = None + if schedule_for_iteration == "simple": + hook = hooks.InjectNoiseHook(source, seed, start_strength, end_strength) + + return (hook, ) + except Exception as e: + print("[ERROR] NoiseInjectionHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +class DenoiseScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_denoise): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleDenoiseScheduleHook(target_denoise) + + return (hook, ) + + +class StepsScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_steps): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleStepsScheduleHook(target_steps) + + return (hook, ) + + +class DetailerHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("DETAILER_HOOK",), + "hook2": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = hooks.DetailerHookCombine(hook1, hook2) + return (hook, ) + + +class PixelKSampleHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("PK_HOOK",), + "hook2": ("PK_HOOK",), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = hooks.PixelKSampleHookCombine(hook1, hook2) + return (hook, ) + + +class PixelTiledKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, + pk_hook_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, + tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength) + return (upscaler, ) + else: + print("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelTiledKSampleUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE",) + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, + tile_cnet_opt=None, tile_cnet_strength=1.0): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, + tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength) + return (upscaler, ) + else: + print("[ERROR] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None, tile_size=512, scheduler_func_opt=None): + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size, scheduler_func=scheduler_func_opt) + return (upscaler, ) + + +class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit_pipe" + + CATEGORY = "ImpactPack/Upscale" + + def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, + use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, + tile_size=512, scheduler_func_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0): + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size, scheduler_func=scheduler_func_opt, + tile_cnet_opt=tile_cnet_opt, tile_cnet_strength=tile_cnet_strength) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "vae": ("VAE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + + mask = make_2d_mask(mask) + + _, _, vae, _, _ = basic_pipe + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class IterativeLatentUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "step_mode": (["simple", "geometric"], {"default": "simple"}) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT", "VAE") + RETURN_NAMES = ("latent", "vae") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, step_mode="simple", unique_id=None): + w = samples['samples'].shape[3]*8 # image width + h = samples['samples'].shape[2]*8 # image height + + if temp_prefix == "": + temp_prefix = None + + if step_mode == "geometric": + upscale_factor_unit = pow(upscale_factor, 1.0/steps) + else: # simple + upscale_factor_unit = max(0, (upscale_factor - 1.0) / steps) + + current_latent = samples + scale = 1 + + for i in range(steps-1): + if step_mode == "geometric": + scale *= upscale_factor_unit + else: # simple + scale += upscale_factor_unit + + new_w = w*scale + new_h = h*scale + core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) + print(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") + step_info = i, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + if scale < upscale_factor: + new_w = w*upscale_factor + new_h = h*upscale_factor + core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) + print(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") + step_info = steps-1, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + core.update_node_status(unique_id, "", None) + + return (current_latent, upscaler.vae) + + +class IterativeImageUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pixels": ("IMAGE", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "vae": ("VAE",), + "step_mode": (["simple", "geometric"], {"default": "simple"}) + }, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, step_mode="simple", unique_id=None): + if temp_prefix == "": + temp_prefix = None + + core.update_node_status(unique_id, "VAEEncode (first)", 0) + if upscaler.is_tiled: + latent = nodes.VAEEncodeTiled().encode(vae, pixels, upscaler.tile_size)[0] + else: + latent = nodes.VAEEncode().encode(vae, pixels)[0] + + refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, step_mode, unique_id) + + core.update_node_status(unique_id, "VAEDecode (final)", 1.0) + if upscaler.is_tiled: + pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0], upscaler.tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] + + core.update_node_status(unique_id, "", None) + + return (pixels, ) + + +class FaceDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "detailer_pipe": ("DETAILER_PIPE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, + sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + result_img = None + result_mask = None + result_cropped_enhanced = [] + result_cropped_enhanced_alpha = [] + result_cnet_images = [] + + if len(image) > 1: + print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt, detailer_hook, \ + refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + for i, single_image in enumerate(image): + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img + result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask + result_cropped_enhanced.extend(cropped_enhanced) + result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) + result_cnet_images.extend(cnet_pil_list) + + if len(result_cropped_enhanced) == 0: + result_cropped_enhanced = [empty_pil_tensor()] + + if len(result_cropped_enhanced_alpha) == 0: + result_cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(result_cnet_images) == 0: + result_cnet_images = [empty_pil_tensor()] + + return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, detailer_pipe, result_cnet_images + + +class MaskDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "mask bbox", "label_off": "crop region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}), + + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE", ), + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "contour_fill": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt") + OUTPUT_IS_LIST = (False, True, True, False, False) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode, + seed, steps, cfg, sampler_name, scheduler, denoise, + feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, + refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, + bbox_fill=False, contour_fill=True, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: MaskDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + # create segs + if mask is not None: + mask = make_2d_mask(mask) + segs = core.mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + else: + segs = ((image.shape[1], image.shape[2]), []) + + enhanced_img_batch = None + cropped_enhanced_list = [] + cropped_enhanced_alpha_list = [] + + for i in range(batch_size): + if mask is not None: + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, _, _ = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed+i, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, mask_mode, + force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, + refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + else: + enhanced_img, cropped_enhanced, cropped_enhanced_alpha = image, [], [] + + if enhanced_img_batch is None: + enhanced_img_batch = enhanced_img + else: + enhanced_img_batch = torch.cat((enhanced_img_batch, enhanced_img), dim=0) + + cropped_enhanced_list += cropped_enhanced + cropped_enhanced_alpha_list += cropped_enhanced_alpha + + # set fallback image + if len(cropped_enhanced_list) == 0: + cropped_enhanced_list = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha_list) == 0: + cropped_enhanced_alpha_list = [empty_pil_tensor()] + + return enhanced_img_batch, cropped_enhanced_list, cropped_enhanced_alpha_list, basic_pipe, refiner_basic_pipe_opt + + +class DetailerForEachTest(DetailerForEach): + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") + RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class DetailerForEachTestPipe(DetailerForEachPipe): + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", ) + RETURN_NAMES = ("image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", 'cnet_images') + OUTPUT_IS_LIST = (False, False, False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, new_segs, basic_pipe, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class SegsBitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, mask): + return (core.segs_bitwise_and_mask(segs, mask), ) + + +class SegsBitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "masks": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, masks): + return (core.apply_mask_to_each_seg(segs, masks), ) + + +class BitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + mask = core.segs_to_combined_mask(mask_segs) + mask = make_3d_mask(mask) + + return SegsBitwiseAndMask().doit(base_segs, mask) + + +class SubtractMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + mask = core.segs_to_combined_mask(mask_segs) + mask = make_3d_mask(mask) + return (core.segs_bitwise_subtract_mask(base_segs, mask), ) + + +class ToBinaryMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "threshold": ("INT", {"default": 20, "min": 1, "max": 255}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, threshold): + mask = to_binary_mask(mask, threshold/255.0) + return (mask,) + + +class BitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = bitwise_and_masks(mask1, mask2) + return (mask,) + + +class SubtractMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK", ), + "mask2": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = subtract_masks(mask1, mask2) + return (mask,) + + +class AddMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = add_masks(mask1, mask2) + return (mask,) + + +import nodes + + +def get_image_hash(arr): + split_index1 = arr.shape[0] // 2 + split_index2 = arr.shape[1] // 2 + part1 = arr[:split_index1, :split_index2] + part2 = arr[:split_index1, split_index2:] + part3 = arr[split_index1:, :split_index2] + part4 = arr[split_index1:, split_index2:] + + # 각 부분을 합산 + sum1 = np.sum(part1) + sum2 = np.sum(part2) + sum3 = np.sum(part3) + sum4 = np.sum(part4) + + return hash((sum1, sum2, sum3, sum4)) + + +def get_file_item(base_type, path): + path_type = base_type + + if path == "[output]": + path_type = "output" + path = path[:-9] + elif path == "[input]": + path_type = "input" + path = path[:-8] + elif path == "[temp]": + path_type = "temp" + path = path[:-7] + + subfolder = os.path.dirname(path) + filename = os.path.basename(path) + + return { + "filename": filename, + "subfolder": subfolder, + "type": path_type + } + + +class ImageReceiver: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "save_to_workflow": ("BOOLEAN", {"default": False}), + "image_data": ("STRING", {"multiline": False}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + RETURN_TYPES = ("IMAGE", "MASK") + + CATEGORY = "ImpactPack/Util" + + def doit(self, image, link_id, save_to_workflow, image_data, trigger_always): + if save_to_workflow: + try: + image_data = base64.b64decode(image_data.split(",")[1]) + i = Image.open(BytesIO(image_data)) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return (image, mask.unsqueeze(0)) + except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: ImageReceiver - invalid 'image_data'") + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return (empty_pil_tensor(64, 64), mask, ) + else: + return nodes.LoadImage().load_image(image) + + @classmethod + def VALIDATE_INPUTS(s, image, link_id, save_to_workflow, image_data, trigger_always): + if image != '#DATA' and not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: + return "Invalid image file: {}".format(image) + + return True + + @classmethod + def IS_CHANGED(s, image, link_id, save_to_workflow, image_data, trigger_always): + if trigger_always: + return float("NaN") + else: + if save_to_workflow: + return hash(image_data) + else: + return hash(image) + + +from server import PromptServer + +class ImageSender(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ImgSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): + result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) + PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) + return result + + +class LatentReceiver: + def __init__(self): + self.input_dir = folder_paths.get_input_directory() + self.type = "input" + + @classmethod + def INPUT_TYPES(s): + def check_file_extension(x): + return x.endswith(".latent") or x.endswith(".latent.png") + + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] + return {"required": { + "latent": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT",) + + @staticmethod + def load_preview_latent(image_path): + if not os.path.exists(image_path): + return None + + image = Image.open(image_path) + exif_data = piexif.load(image.info["exif"]) + + if piexif.ExifIFD.UserComment in exif_data["Exif"]: + compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] + compressed_data_io = BytesIO(compressed_data) + with zipfile.ZipFile(compressed_data_io, mode='r') as archive: + tensor_bytes = archive.read("latent") + tensor = safetensors.torch.load(tensor_bytes) + return {"samples": tensor['latent_tensor']} + return None + + def parse_filename(self, filename): + pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" + match = re.match(pattern, filename) + if match: + subfolder = match.group(1) + filename = match.group(2).rstrip() + file_type = match.group(3) + else: + subfolder = '' + file_type = self.type + + return {'filename': filename, 'subfolder': subfolder, 'type': file_type} + + def doit(self, **kwargs): + if 'latent' not in kwargs: + return (torch.zeros([1, 4, 8, 8]), ) + + latent = kwargs['latent'] + + latent_name = latent + latent_path = folder_paths.get_annotated_filepath(latent_name) + + if latent.endswith(".latent"): + latent = safetensors.torch.load_file(latent_path, device="cpu") + multiplier = 1.0 + if "latent_format_version_0" not in latent: + multiplier = 1.0 / 0.18215 + samples = {"samples": latent["latent_tensor"].float() * multiplier} + else: + samples = LatentReceiver.load_preview_latent(latent_path) + + if samples is None: + samples = {'samples': torch.zeros([1, 4, 8, 8])} + + preview = self.parse_filename(latent_name) + + return { + 'ui': {"images": [preview]}, + 'result': (samples, ) + } + + @classmethod + def IS_CHANGED(s, latent, link_id, trigger_always): + if trigger_always: + return float("NaN") + else: + image_path = folder_paths.get_annotated_filepath(latent) + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, latent, link_id, trigger_always): + if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: + return "Invalid latent file: {}".format(latent) + return True + + +class LatentSender(nodes.SaveLatent): + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "filename_prefix": ("STRING", {"default": "latents/LatentSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "preview_method": (["Latent2RGB-SDXL", "Latent2RGB-SD15", "TAESDXL", "TAESD15"],) + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + RETURN_TYPES = () + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): + compressed_data = BytesIO() + with zipfile.ZipFile(compressed_data, mode='w') as archive: + archive.writestr("latent", tensor_bytes) + image = image.copy() + exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} + + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + exif_bytes = piexif.dump(exif_data) + image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) + + @staticmethod + def prepare_preview(latent_tensor, preview_method): + from comfy.cli_args import LatentPreviewMethod + import comfy.latent_formats as latent_formats + + lower_bound = 128 + upper_bound = 256 + + if preview_method == "Latent2RGB-SD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "TAESD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.TAESD + elif preview_method == "TAESDXL": + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.TAESD + else: # preview_method == "Latent2RGB-SDXL" + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.Latent2RGB + + previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) + + image = previewer.decode_latent_to_preview(latent_tensor) + min_size = min(image.size[0], image.size[1]) + max_size = max(image.size[0], image.size[1]) + + scale_factor = 1 + if max_size > upper_bound: + scale_factor = upper_bound/max_size + + # prevent too small preview + if min_size*scale_factor < lower_bound: + scale_factor = lower_bound/min_size + + w = int(image.size[0] * scale_factor) + h = int(image.size[1] * scale_factor) + + image = image.resize((w, h), resample=Image.NEAREST) + + return LatentSender.attach_format_text(image) + + @staticmethod + def attach_format_text(image): + width_a, height_a = image.size + + letter_image = Image.open(latent_letter_path) + width_b, height_b = letter_image.size + + new_width = max(width_a, width_b) + new_height = height_a + height_b + + new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) + + offset_x = (new_width - width_b) // 2 + offset_y = (height_a + (new_height - height_a - height_b) // 2) + new_image.paste(letter_image, (offset_x, offset_y)) + + new_image.paste(image, (0, 0)) + + return new_image + + def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, preview_method="Latent2RGB-SDXL", prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + # load preview + preview = LatentSender.prepare_preview(samples['samples'], preview_method) + + # support save metadata for latent sharing + file = f"{filename}_{counter:05}_.latent.png" + fullpath = os.path.join(full_output_folder, file) + + output = {"latent_tensor": samples["samples"]} + + tensor_bytes = safetensors.torch.save(output) + LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) + + latent_path = { + 'filename': file, + 'subfolder': subfolder, + 'type': self.type + } + + PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) + + return {'ui': {'images': [latent_path]}} + + +class ImpactWildcardProcessor: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + RETURN_TYPES = ("STRING", ) + FUNCTION = "doit" + + @staticmethod + def process(**kwargs): + return impact.wildcards.process(**kwargs) + + def doit(self, *args, **kwargs): + populated_text = ImpactWildcardProcessor.process(text=kwargs['populated_text'], seed=kwargs['seed']) + return (populated_text, ) + + +class ImpactWildcardEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), + "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") + RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") + FUNCTION = "doit" + + @staticmethod + def process_with_loras(**kwargs): + return impact.wildcards.process_with_loras(**kwargs) + + @staticmethod + def get_wildcard_list(): + return impact.wildcards.get_wildcard_list() + + def doit(self, *args, **kwargs): + populated = kwargs['populated_text'] + processed = [] + model, clip, conditioning = impact.wildcards.process_with_loras(wildcard_opt=populated, model=kwargs['model'], clip=kwargs['clip'], seed=kwargs['seed'], processed=processed) + return model, clip, conditioning, processed[0] + + +class ImpactSchedulerAdapter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"defaultInput": True, }), + "extra_scheduler": (['None', 'AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]'],), + }} + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = (core.SCHEDULERS,) + RETURN_NAMES = ("scheduler",) + + FUNCTION = "doit" + + def doit(self, scheduler, extra_scheduler): + if extra_scheduler != 'None': + return (extra_scheduler,) + + return (scheduler,) + diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..be1afa5a4061bef6a53894ae913a00253d3d06e2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py @@ -0,0 +1,351 @@ +import nodes +from comfy.k_diffusion import sampling as k_diffusion_sampling +from comfy import samplers +from comfy_extras import nodes_custom_sampler +import latent_preview +import comfy +import torch +import math +import comfy.model_management as mm + + +try: + from comfy_extras.nodes_custom_sampler import Noise_EmptyNoise, Noise_RandomNoise + import node_helpers +except: + print(f"\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +def calculate_sigmas(model, sampler, scheduler, steps): + discard_penultimate_sigma = False + if sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']: + steps += 1 + discard_penultimate_sigma = True + + if scheduler.startswith('AYS'): + sigmas = nodes.NODE_CLASS_MAPPINGS['AlignYourStepsScheduler']().get_sigmas(scheduler[4:], steps, denoise=1.0)[0] + elif scheduler.startswith('GITS[coeff='): + sigmas = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']().get_sigmas(float(scheduler[11:-1]), steps, denoise=1.0)[0] + else: + sigmas = samplers.calculate_sigmas(model.get_model_object("model_sampling"), scheduler, steps) + + if discard_penultimate_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas + + +def get_noise_sampler(x, cpu, total_sigmas, **kwargs): + if 'extra_args' in kwargs and 'seed' in kwargs['extra_args']: + sigma_min, sigma_max = total_sigmas[total_sigmas > 0].min(), total_sigmas.max() + seed = kwargs['extra_args'].get("seed", None) + return k_diffusion_sampling.BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=cpu) + return None + + +def ksampler(sampler_name, total_sigmas, extra_options={}, inpaint_options={}): + if sampler_name == "dpmpp_sde": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, True, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_sde(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + elif sampler_name == "dpmpp_sde_gpu": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, False, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_sde_gpu(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + elif sampler_name == "dpmpp_2m_sde": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, True, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_2m_sde(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + elif sampler_name == "dpmpp_2m_sde_gpu": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, False, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_2m_sde_gpu(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + elif sampler_name == "dpmpp_3m_sde": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, True, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_3m_sde(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + elif sampler_name == "dpmpp_3m_sde_gpu": + def sample_dpmpp_sde(model, x, sigmas, **kwargs): + noise_sampler = get_noise_sampler(x, False, total_sigmas, **kwargs) + if noise_sampler is not None: + kwargs['noise_sampler'] = noise_sampler + + return k_diffusion_sampling.sample_dpmpp_3m_sde_gpu(model, x, sigmas, **kwargs) + + sampler_function = sample_dpmpp_sde + + else: + return comfy.samplers.sampler_object(sampler_name) + + return samplers.KSAMPLER(sampler_function, extra_options, inpaint_options) + + +# modified version of SamplerCustom.sample +def sample_with_custom_noise(model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image, noise=None, callback=None): + latent = latent_image + latent_image = latent["samples"] + + if hasattr(comfy.sample, 'fix_empty_latent_channels'): + latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image) + + out = latent.copy() + out['samples'] = latent_image + + if noise is None: + if not add_noise: + noise = Noise_EmptyNoise().generate_noise(out) + else: + noise = Noise_RandomNoise(noise_seed).generate_noise(out) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + x0_output = {} + preview_callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output) + + if callback is not None: + def touched_callback(step, x0, x, total_steps): + callback(step, x0, x, total_steps) + preview_callback(step, x0, x, total_steps) + else: + touched_callback = preview_callback + + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED + + if negative != 'NegativePlaceholder': + guider = comfy.samplers.CFGGuider(model) + guider.set_conds(positive, negative) + guider.set_cfg(cfg) + else: + guider = nodes_custom_sampler.Guider_Basic(model) + positive = node_helpers.conditioning_set_values(positive, {"guidance": cfg}) + guider.set_conds(positive) + + device = mm.get_torch_device() + + noise = noise.to(device) + latent_image = latent_image.to(device) + if noise_mask is not None: + noise_mask = noise_mask.to(device) + + samples = guider.sample(noise, latent_image, sampler, sigmas, denoise_mask=noise_mask, callback=touched_callback, disable_pbar=disable_pbar, seed=noise_seed) + samples = samples.to(comfy.model_management.intermediate_device()) + + out["samples"] = samples + if "x0" in x0_output: + out_denoised = latent.copy() + out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu()) + else: + out_denoised = out + return out, out_denoised + + +# When sampling one step at a time, it mitigates the problem. (especially for _sde series samplers) +def separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, positive, negative, + latent_image, start_at_step, end_at_step, return_with_leftover_noise, sigma_ratio=1.0, sampler_opt=None, noise=None, callback=None, scheduler_func=None): + + if scheduler_func is not None: + total_sigmas = scheduler_func(model, sampler_name, steps) + else: + if sampler_opt is None: + total_sigmas = calculate_sigmas(model, sampler_name, scheduler, steps) + else: + total_sigmas = calculate_sigmas(model, "", scheduler, steps) + + sigmas = total_sigmas + + if end_at_step is not None and end_at_step < (len(total_sigmas) - 1): + sigmas = total_sigmas[:end_at_step + 1] + if not return_with_leftover_noise: + sigmas[-1] = 0 + + if start_at_step is not None: + if start_at_step < (len(sigmas) - 1): + sigmas = sigmas[start_at_step:] * sigma_ratio + else: + if latent_image is not None: + return latent_image + else: + return {'samples': torch.zeros_like(noise)} + + if sampler_opt is None: + impact_sampler = ksampler(sampler_name, total_sigmas) + else: + impact_sampler = sampler_opt + + if len(sigmas) == 0 or (len(sigmas) == 1 and sigmas[0] == 0): + return latent_image + + res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) + + if return_with_leftover_noise: + return res[0] + else: + return res[1] + + +def impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, sigma_ratio=1.0, sampler_opt=None, noise=None, scheduler_func=None): + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + steps + return separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + start_at_step, end_at_step, False, scheduler_func=scheduler_func) + + +def ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, sigma_factor=1.0, noise=None, scheduler_func=None): + + if refiner_ratio is None or refiner_model is None or refiner_clip is None or refiner_positive is None or refiner_negative is None: + # Use separated_sample instead of KSampler for `AYS scheduler` + # refined_latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise * sigma_factor)[0] + + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + steps + + refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, False, + sigma_ratio=sigma_factor, noise=noise, scheduler_func=scheduler_func) + else: + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + math.floor(steps * (1.0 - refiner_ratio)) + + # print(f"pre: {start_at_step} .. {end_at_step} / {advanced_steps}") + temp_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, True, + sigma_ratio=sigma_factor, noise=noise, scheduler_func=scheduler_func) + + if 'noise_mask' in latent_image: + # noise_latent = \ + # impact_sampling.separated_sample(refiner_model, "enable", seed, advanced_steps, cfg, sampler_name, + # scheduler, refiner_positive, refiner_negative, latent_image, end_at_step, + # end_at_step, "enable") + + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + temp_latent = latent_compositor.composite(latent_image, temp_latent, 0, 0, False, latent_image['noise_mask'])[0] + + # print(f"post: {end_at_step} .. {advanced_steps + 1} / {advanced_steps}") + refined_latent = separated_sample(refiner_model, False, seed, advanced_steps, cfg, sampler_name, scheduler, + refiner_positive, refiner_negative, temp_latent, end_at_step, advanced_steps + 1, False, + sigma_ratio=sigma_factor, scheduler_func=scheduler_func) + + return refined_latent + + +class KSamplerAdvancedWrapper: + params = None + + def __init__(self, model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=None, sigma_factor=1.0, scheduler_func=None): + self.params = model, cfg, sampler_name, scheduler, positive, negative, sigma_factor + self.sampler_opt = sampler_opt + self.scheduler_func = scheduler_func + + def clone_with_conditionings(self, positive, negative): + model, cfg, sampler_name, scheduler, _, _, _ = self.params + return KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, self.sampler_opt) + + def sample_advanced(self, add_noise, seed, steps, latent_image, start_at_step, end_at_step, return_with_leftover_noise, hook=None, + recovery_mode="ratio additional", recovery_sampler="AUTO", recovery_sigma_ratio=1.0, noise=None): + + model, cfg, sampler_name, scheduler, positive, negative, sigma_factor = self.params + # steps, start_at_step, end_at_step = self.compensate_denoise(steps, start_at_step, end_at_step) + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent = hook.pre_ksample_advanced(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise) + + if recovery_mode != 'DISABLE' and sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']: + base_image = latent_image.copy() + if recovery_mode == "ratio between": + sigma_ratio = 1.0 - recovery_sigma_ratio + else: + sigma_ratio = 1.0 + else: + base_image = None + sigma_ratio = 1.0 + + try: + if sigma_ratio > 0: + latent_image = separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise, sigma_ratio=sigma_ratio * sigma_factor, + sampler_opt=self.sampler_opt, noise=noise, scheduler_func=self.scheduler_func) + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + print(f"\nWARN: sampling skipped - sigma_min and sigma_max are 0") + return latent_image + + if (recovery_sigma_ratio > 0 and recovery_mode != 'DISABLE' and + sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']): + compensate = 0 if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu'] else 2 + if recovery_sampler == "AUTO": + recovery_sampler = 'dpm_fast' if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu'] else 'dpmpp_2m' + + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + + noise_mask = latent_image['noise_mask'] + + if len(noise_mask.shape) == 4: + noise_mask = noise_mask.squeeze(0).squeeze(0) + + latent_image = latent_compositor.composite(base_image, latent_image, 0, 0, False, noise_mask)[0] + + try: + latent_image = separated_sample(model, add_noise, seed, steps, cfg, recovery_sampler, scheduler, + positive, negative, latent_image, start_at_step-compensate, end_at_step, return_with_leftover_noise, + sigma_ratio=recovery_sigma_ratio * sigma_factor, sampler_opt=self.sampler_opt, scheduler_func=self.scheduler_func) + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + print(f"\nWARN: sampling skipped - sigma_min and sigma_max are 0") + + return latent_image + + +class KSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=None): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.scheduler_func = scheduler_func + + def sample(self, latent_image, hook=None): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise) + + return impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=self.scheduler_func) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py new file mode 100644 index 0000000000000000000000000000000000000000..c5746c10ce6d5d7cc878a6e27e95f1fa8ad07c8e --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py @@ -0,0 +1,564 @@ +import os +import threading +import traceback + +from aiohttp import web + +import impact +import folder_paths + +import torchvision + +import impact.core as core +import impact.impact_pack as impact_pack +from impact.utils import to_tensor +from segment_anything import SamPredictor, sam_model_registry +import numpy as np +import nodes +from PIL import Image +import io +import impact.wildcards as wildcards +import comfy +from io import BytesIO +import random +from server import PromptServer + + +@PromptServer.instance.routes.post("/upload/temp") +async def upload_image(request): + upload_dir = folder_paths.get_temp_directory() + + if not os.path.exists(upload_dir): + os.makedirs(upload_dir) + + post = await request.post() + image = post.get("image") + + if image and image.file: + filename = image.filename + if not filename: + return web.Response(status=400) + + split = os.path.splitext(filename) + i = 1 + while os.path.exists(os.path.join(upload_dir, filename)): + filename = f"{split[0]} ({i}){split[1]}" + i += 1 + + filepath = os.path.join(upload_dir, filename) + + with open(filepath, "wb") as f: + f.write(image.file.read()) + + return web.json_response({"name": filename}) + else: + return web.Response(status=400) + + +sam_predictor = None +default_sam_model_name = os.path.join(impact_pack.model_path, "sams", "sam_vit_b_01ec64.pth") + +sam_lock = threading.Condition() + +last_prepare_data = None + + +def async_prepare_sam(image_dir, model_name, filename): + with sam_lock: + global sam_predictor + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam_model = sam_model_registry[model_kind](checkpoint=model_name) + sam_predictor = SamPredictor(sam_model) + + image_path = os.path.join(image_dir, filename) + image = nodes.LoadImage().load_image(image_path)[0] + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + sam_predictor.set_image(image, "RGB") + sam_predictor.model.cpu() + + +@PromptServer.instance.routes.post("/sam/prepare") +async def sam_prepare(request): + global sam_predictor + global last_prepare_data + data = await request.json() + + with sam_lock: + if last_prepare_data is not None and last_prepare_data == data: + # already loaded: skip -- prevent redundant loading + return web.Response(status=200) + + last_prepare_data = data + + model_name = 'sam_vit_b_01ec64.pth' + if data['sam_model_name'] == 'auto': + model_name = impact.config.get_config()['sam_editor_model'] + + model_name = os.path.join(impact_pack.model_path, "sams", model_name) + + print(f"[INFO] ComfyUI-Impact-Pack: Loading SAM model '{impact_pack.model_path}'") + + filename, image_dir = folder_paths.annotated_filepath(data["filename"]) + + if image_dir is None: + typ = data['type'] if data['type'] != '' else 'output' + image_dir = folder_paths.get_directory_by_type(typ) + if data['subfolder'] is not None and data['subfolder'] != '': + image_dir += f"/{data['subfolder']}" + + if image_dir is None: + return web.Response(status=400) + + thread = threading.Thread(target=async_prepare_sam, args=(image_dir, model_name, filename,)) + thread.start() + + print(f"[INFO] ComfyUI-Impact-Pack: SAM model loaded. ") + return web.Response(status=200) + + +@PromptServer.instance.routes.post("/sam/release") +async def release_sam(request): + global sam_predictor + + with sam_lock: + del sam_predictor + sam_predictor = None + + print(f"[INFO] ComfyUI-Impact-Pack: unloading SAM model") + + +@PromptServer.instance.routes.post("/sam/detect") +async def sam_detect(request): + global sam_predictor + with sam_lock: + if sam_predictor is not None: + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + try: + data = await request.json() + + positive_points = data['positive_points'] + negative_points = data['negative_points'] + threshold = data['threshold'] + + points = [] + plabs = [] + + for p in positive_points: + points.append(p) + plabs.append(1) + + for p in negative_points: + points.append(p) + plabs.append(0) + + detected_masks = core.sam_predict(sam_predictor, points, plabs, None, threshold) + mask = core.combine_masks2(detected_masks) + + if mask is None: + return web.Response(status=400) + + image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + i = 255. * image.cpu().numpy() + + img = Image.fromarray(np.clip(i[0], 0, 255).astype(np.uint8)) + + img_buffer = io.BytesIO() + img.save(img_buffer, format='png') + + headers = {'Content-Type': 'image/png'} + finally: + sam_predictor.model.to(device="cpu") + + return web.Response(body=img_buffer.getvalue(), headers=headers) + + else: + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/wildcards/refresh") +async def wildcards_refresh(request): + impact.wildcards.wildcard_load() + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/impact/wildcards/list") +async def wildcards_list(request): + data = {'data': impact.wildcards.get_wildcard_list()} + return web.json_response(data) + + +@PromptServer.instance.routes.post("/impact/wildcards") +async def populate_wildcards(request): + data = await request.json() + populated = wildcards.process(data['text'], data.get('seed', None)) + return web.json_response({"text": populated}) + + +segs_picker_map = {} + +@PromptServer.instance.routes.get("/impact/segs/picker/count") +async def segs_picker_count(request): + node_id = request.rel_url.query.get('id', '') + + if node_id in segs_picker_map: + res = len(segs_picker_map[node_id]) + return web.Response(status=200, text=str(res)) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/segs/picker/view") +async def segs_picker(request): + node_id = request.rel_url.query.get('id', '') + idx = int(request.rel_url.query.get('idx', '')) + + if node_id in segs_picker_map and idx < len(segs_picker_map[node_id]): + img = to_tensor(segs_picker_map[node_id][idx]).permute(0, 3, 1, 2).squeeze(0) + pil = torchvision.transforms.ToPILImage('RGB')(img) + + image_bytes = BytesIO() + pil.save(image_bytes, format="PNG") + image_bytes.seek(0) + return web.Response(status=200, body=image_bytes, content_type='image/png', headers={"Content-Disposition": f"filename={node_id}{idx}.png"}) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/view/validate") +async def view_validate(request): + if "filename" in request.rel_url.query: + filename = request.rel_url.query["filename"] + subfolder = request.rel_url.query["subfolder"] + filename, base_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if base_dir is None: + base_dir = folder_paths.get_input_directory() + + file = os.path.join(base_dir, subfolder, filename) + + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/validate/pb_id_image") +async def view_validate(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id not in core.preview_bridge_image_id_map: + return web.Response(status=400) + + file = core.preview_bridge_image_id_map[pb_id] + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/set/pb_id_image") +async def set_previewbridge_image(request): + try: + if "filename" in request.rel_url.query: + node_id = request.rel_url.query["node_id"] + filename = request.rel_url.query["filename"] + path_type = request.rel_url.query["type"] + subfolder = request.rel_url.query["subfolder"] + filename, output_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if output_dir is None: + if path_type == 'input': + output_dir = folder_paths.get_input_directory() + elif path_type == 'output': + output_dir = folder_paths.get_output_directory() + else: + output_dir = folder_paths.get_temp_directory() + + file = os.path.join(output_dir, subfolder, filename) + item = { + 'filename': filename, + 'type': path_type, + 'subfolder': subfolder, + } + pb_id = core.set_previewbridge_image(node_id, file, item) + + return web.Response(status=200, text=pb_id) + except Exception: + traceback.print_exc() + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/get/pb_id_image") +async def get_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + _, path_item = core.preview_bridge_image_id_map[pb_id] + return web.json_response(path_item) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/view/pb_id_image") +async def view_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + file = core.preview_bridge_image_id_map[pb_id] + + with Image.open(file) as img: + filename = os.path.basename(file) + return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""}) + + return web.Response(status=400) + + +def onprompt_for_switch(json_data): + inversed_switch_info = {} + onprompt_switch_info = {} + onprompt_cond_branch_info = {} + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactInversedSwitch': + select_input = v['inputs']['select'] + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + inversed_switch_info[k] = input_node['inputs']['value'] + else: + inversed_switch_info[k] = select_input + + elif cls in ['ImpactSwitch', 'LatentSwitch', 'SEGSSwitch', 'ImpactMakeImageList']: + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'select' in v['inputs']: + select_input = v['inputs']['select'] + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + onprompt_switch_info[k] = input_node['inputs']['value'] + if input_node['class_type'] == 'ImpactSwitch' and 'inputs' in input_node and 'select' in input_node['inputs']: + if isinstance(input_node['inputs']['select'], int): + onprompt_switch_info[k] = input_node['inputs']['select'] + else: + print(f"\n##### ##### #####\n[WARN] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs.\n##### ##### #####\n") + else: + onprompt_switch_info[k] = select_input + + elif cls == 'ImpactConditionalBranchSelMode': + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'cond' in v['inputs']: + cond_input = v['inputs']['cond'] + if isinstance(cond_input, list) and len(cond_input) == 2: + input_node = json_data['prompt'][cond_input[0]] + if (input_node['class_type'] == 'ImpactValueReceiver' and 'inputs' in input_node + and 'value' in input_node['inputs'] and 'typ' in input_node['inputs']): + if 'BOOLEAN' == input_node['inputs']['typ']: + try: + onprompt_cond_branch_info[k] = input_node['inputs']['value'].lower() == "true" + except: + pass + else: + onprompt_cond_branch_info[k] = cond_input + + for k, v in json_data['prompt'].items(): + disable_targets = set() + + for kk, vv in v['inputs'].items(): + if isinstance(vv, list) and len(vv) == 2: + if vv[0] in inversed_switch_info: + if vv[1] + 1 != inversed_switch_info[vv[0]]: + disable_targets.add(kk) + + if k in onprompt_switch_info: + selected_slot_name = f"input{onprompt_switch_info[k]}" + for kk, vv in v['inputs'].items(): + if kk != selected_slot_name and kk.startswith('input'): + disable_targets.add(kk) + + if k in onprompt_cond_branch_info: + selected_slot_name = "tt_value" if onprompt_cond_branch_info[k] else "ff_value" + for kk, vv in v['inputs'].items(): + if kk in ['tt_value', 'ff_value'] and kk != selected_slot_name: + disable_targets.add(kk) + + for kk in disable_targets: + del v['inputs'][kk] + +def onprompt_for_pickers(json_data): + detected_pickers = set() + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactSEGSPicker': + detected_pickers.add(k) + + # garbage collection + keys_to_remove = [key for key in segs_picker_map if key not in detected_pickers] + for key in keys_to_remove: + del segs_picker_map[key] + + +def gc_preview_bridge_cache(json_data): + prompt_keys = json_data['prompt'].keys() + + for key in list(core.preview_bridge_cache.keys()): + if key not in prompt_keys: + print(f"key deleted: {key}") + del core.preview_bridge_cache[key] + + +def workflow_imagereceiver_update(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v and v['class_type'] == 'ImageReceiver': + if v['inputs']['save_to_workflow']: + v['inputs']['image'] = "#DATA" + + +def regional_sampler_seed_update(json_data): + prompt = json_data['prompt'] + + for k, v in prompt.items(): + if 'class_type' in v and v['class_type'] == 'RegionalSampler': + seed_2nd_mode = v['inputs']['seed_2nd_mode'] + + new_seed = None + if seed_2nd_mode == 'increment': + new_seed = v['inputs']['seed_2nd']+1 + if new_seed > 1125899906842624: + new_seed = 0 + elif seed_2nd_mode == 'decrement': + new_seed = v['inputs']['seed_2nd']-1 + if new_seed < 0: + new_seed = 1125899906842624 + elif seed_2nd_mode == 'randomize': + new_seed = random.randint(0, 1125899906842624) + + if new_seed is not None: + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "seed_2nd", "type": "INT", "value": new_seed}) + + +def onprompt_populate_wildcards(json_data): + prompt = json_data['prompt'] + + updated_widget_values = {} + for k, v in prompt.items(): + if 'class_type' in v and (v['class_type'] == 'ImpactWildcardEncode' or v['class_type'] == 'ImpactWildcardProcessor'): + inputs = v['inputs'] + if inputs['mode'] and isinstance(inputs['populated_text'], str): + if isinstance(inputs['seed'], list): + try: + input_node = prompt[inputs['seed'][0]] + if input_node['class_type'] == 'ImpactInt': + input_seed = int(input_node['inputs']['value']) + if not isinstance(input_seed, int): + continue + if input_node['class_type'] == 'Seed (rgthree)': + input_seed = int(input_node['inputs']['seed']) + if not isinstance(input_seed, int): + continue + else: + print(f"[Impact Pack] Only `ImpactInt`, `Seed (rgthree)` and `Primitive` Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ") + continue + except: + continue + else: + input_seed = int(inputs['seed']) + + inputs['populated_text'] = wildcards.process(inputs['wildcard_text'], input_seed) + inputs['mode'] = False + + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "populated_text", "type": "STRING", "value": inputs['populated_text']}) + updated_widget_values[k] = inputs['populated_text'] + + if 'extra_data' in json_data and 'extra_pnginfo' in json_data['extra_data']: + for node in json_data['extra_data']['extra_pnginfo']['workflow']['nodes']: + key = str(node['id']) + if key in updated_widget_values: + node['widgets_values'][1] = updated_widget_values[key] + node['widgets_values'][2] = False + + +def onprompt_for_remote(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v: + cls = v['class_type'] + if cls == 'ImpactRemoteBoolean' or cls == 'ImpactRemoteInt': + inputs = v['inputs'] + node_id = str(inputs['node_id']) + + if node_id not in prompt: + continue + + target_inputs = prompt[node_id]['inputs'] + + widget_name = inputs['widget_name'] + if widget_name in target_inputs: + widget_type = None + if cls == 'ImpactRemoteBoolean' and isinstance(target_inputs[widget_name], bool): + widget_type = 'BOOLEAN' + + elif cls == 'ImpactRemoteInt' and (isinstance(target_inputs[widget_name], int) or isinstance(target_inputs[widget_name], float)): + widget_type = 'INT' + + if widget_type is None: + break + + target_inputs[widget_name] = inputs['value'] + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": node_id, "widget_name": widget_name, "type": widget_type, "value": inputs['value']}) + + +def onprompt(json_data): + try: + onprompt_for_remote(json_data) # NOTE: top priority + onprompt_for_switch(json_data) + onprompt_for_pickers(json_data) + onprompt_populate_wildcards(json_data) + gc_preview_bridge_cache(json_data) + workflow_imagereceiver_update(json_data) + regional_sampler_seed_update(json_data) + core.current_prompt = json_data + except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: Error on prompt - several features will not work.\n{e}") + + return json_data + + +PromptServer.instance.add_on_prompt_handler(onprompt) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..8e09c42e08b196ec240182e2a88d1e807e859afb --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py @@ -0,0 +1,273 @@ +import folder_paths + +import impact.mmdet_nodes as mmdet_nodes +from impact.utils import * +from impact.core import SEG +import impact.core as core +import nodes + +class NO_BBOX_MODEL: + pass + + +class NO_SEGM_MODEL: + pass + + +class MMDetLoader: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("mmdets_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("mmdets_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_MODEL", "SEGM_MODEL") + FUNCTION = "load_mmdet" + + CATEGORY = "ImpactPack/Legacy" + + def load_mmdet(self, model_name): + mmdet_path = folder_paths.get_full_path("mmdets", model_name) + model = mmdet_nodes.load_mmdet(mmdet_path) + + if model_name.startswith("bbox"): + return model, NO_SEGM_MODEL() + else: + return NO_BBOX_MODEL(), model + + +class BboxDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_model": ("BBOX_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + @staticmethod + def detect(bbox_model, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + mmdet_results = mmdet_nodes.inference_bbox(bbox_model, image, threshold) + segmasks = core.create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + shape = h, w + return shape, items + + def doit(self, bbox_model, image, threshold, dilation, crop_factor): + return (BboxDetectorForEach.detect(bbox_model, image, threshold, dilation, crop_factor), ) + + +class SegmDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_model": ("SEGM_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + def doit(self, segm_model, image, threshold, dilation): + mmdet_results = mmdet_nodes.inference_segm(image, segm_model, threshold) + segmasks = core.create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + mask = combine_masks(segmasks) + return (mask,) + + +class BboxDetectorCombined(SegmDetectorCombined): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_model": ("BBOX_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 4, "min": 0, "max": 255, "step": 1}), + } + } + + def doit(self, bbox_model, image, threshold, dilation): + mmdet_results = mmdet_nodes.inference_bbox(bbox_model, image, threshold) + segmasks = core.create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + mask = combine_masks(segmasks) + return (mask,) + + +class SegmDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_model": ("SEGM_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + def doit(self, segm_model, image, threshold, dilation, crop_factor): + mmdet_results = mmdet_nodes.inference_segm(image, segm_model, threshold) + segmasks = core.create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + shape = h,w + return ((shape, items), ) + + +class SegsMaskCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "image": ("IMAGE", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + @staticmethod + def combine(segs, image): + h = image.shape[1] + w = image.shape[2] + + mask = np.zeros((h, w), dtype=np.uint8) + + for seg in segs[1]: + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + + return torch.from_numpy(mask.astype(np.float32) / 255.0) + + def doit(self, segs, image): + return (SegsMaskCombine.combine(segs, image), ) + + +class MaskPainter(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": {"images": ("IMAGE",), }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + "optional": {"mask_image": ("IMAGE_PATH",), }, + "optional": {"image": (["#placeholder"], )}, + } + + RETURN_TYPES = ("MASK",) + + FUNCTION = "save_painted_images" + + CATEGORY = "ImpactPack/Legacy" + + def save_painted_images(self, images, filename_prefix="impact-mask", + prompt=None, extra_pnginfo=None, mask_image=None, image=None): + if image == "#placeholder" or image['image_hash'] != id(images): + # new input image + res = self.save_images(images, filename_prefix, prompt, extra_pnginfo) + + item = res['ui']['images'][0] + + if not item['filename'].endswith(']'): + filepath = f"{item['filename']} [{item['type']}]" + else: + filepath = item['filename'] + + _, mask = nodes.LoadImage().load_image(filepath) + + res['ui']['aux'] = [id(images), res['ui']['images']] + res['result'] = (mask, ) + + return res + + else: + # new mask + if '0' in image: # fallback + image = image['0'] + + forward = {'filename': image['forward_filename'], + 'subfolder': image['forward_subfolder'], + 'type': image['forward_type'], } + + res = {'ui': {'images': [forward]}} + + imgpath = "" + if 'subfolder' in image and image['subfolder'] != "": + imgpath = image['subfolder'] + "/" + + imgpath += f"{image['filename']}" + + if 'type' in image and image['type'] != "": + imgpath += f" [{image['type']}]" + + res['ui']['aux'] = [id(images), [forward]] + _, mask = nodes.LoadImage().load_image(imgpath) + res['result'] = (mask, ) + + return res diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py new file mode 100644 index 0000000000000000000000000000000000000000..49e2e84fdca84e999bcf7874019d63751a0afeac --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py @@ -0,0 +1,707 @@ +import sys +import time + +import execution +import impact.impact_server +from server import PromptServer +from impact.utils import any_typ +import impact.core as core +import re +import nodes +import traceback + + +class ImpactCompare: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cmp": (['a = b', 'a <> b', 'a > b', 'a < b', 'a >= b', 'a <= b', 'tt', 'ff'],), + "a": (any_typ, ), + "b": (any_typ, ), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, cmp, a, b): + if cmp == "a = b": + return (a == b, ) + elif cmp == "a <> b": + return (a != b, ) + elif cmp == "a > b": + return (a > b, ) + elif cmp == "a < b": + return (a < b, ) + elif cmp == "a >= b": + return (a >= b, ) + elif cmp == "a <= b": + return (a <= b, ) + elif cmp == 'tt': + return (True, ) + else: + return (False, ) + + +class ImpactNotEmptySEGS: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"segs": ("SEGS",)}} + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, segs): + return (segs[1] != [], ) + + +class ImpactConditionalBranch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cond": ("BOOLEAN",), + "tt_value": (any_typ,), + "ff_value": (any_typ,), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, cond, tt_value, ff_value): + if cond: + return (tt_value,) + else: + return (ff_value,) + + +class ImpactConditionalBranchSelMode: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cond": ("BOOLEAN",), + "sel_mode": ("BOOLEAN", {"default": True, "label_on": "select_on_prompt", "label_off": "select_on_execution"}), + }, + "optional": { + "tt_value": (any_typ,), + "ff_value": (any_typ,), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, cond, sel_mode, tt_value=None, ff_value=None): + print(f'tt={tt_value is None}\nff={ff_value is None}') + if cond: + return (tt_value,) + else: + return (ff_value,) + + +class ImpactConvertDataType: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": {"value": (any_typ,)}} + + RETURN_TYPES = ("STRING", "FLOAT", "INT", "BOOLEAN") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + @staticmethod + def is_number(string): + pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+$') + return bool(pattern.match(string)) + + def doit(self, value): + if self.is_number(str(value)): + num = value + else: + if str.lower(str(value)) != "false": + num = 1 + else: + num = 0 + return (str(value), float(num), int(float(num)), bool(float(num)), ) + + +class ImpactIfNone: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {}, + "optional": {"signal": (any_typ,), "any_input": (any_typ,), } + } + + RETURN_TYPES = (any_typ, "BOOLEAN") + RETURN_NAMES = ("signal_opt", "bool") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + def doit(self, signal=None, any_input=None): + if any_input is None: + return (signal, False, ) + else: + return (signal, True, ) + + +class ImpactLogicalOperators: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "operator": (['and', 'or', 'xor'],), + "bool_a": ("BOOLEAN", {"forceInput": True}), + "bool_b": ("BOOLEAN", {"forceInput": True}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, operator, bool_a, bool_b): + if operator == "and": + return (bool_a and bool_b, ) + elif operator == "or": + return (bool_a or bool_b, ) + else: + return (bool_a != bool_b, ) + + +class ImpactConditionalStopIteration: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "cond": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + OUTPUT_NODE = True + + def doit(self, cond): + if cond: + PromptServer.instance.send_sync("stop-iteration", {}) + return {} + + +class ImpactNeg: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "value": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, value): + return (not value, ) + + +class ImpactInt: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("INT", ) + + def doit(self, value): + return (value, ) + + +class ImpactFloat: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 1.0, "min": -3.402823466e+38, "max": 3.402823466e+38}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("FLOAT", ) + + def doit(self, value): + return (value, ) + + +class ImpactValueSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ, ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + "optional": { + "signal_opt": (any_typ,), + } + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + RETURN_NAMES = ("signal", ) + + def doit(self, value, link_id=0, signal_opt=None): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return (signal_opt, ) + + +class ImpactIntConstSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ, ), + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + def doit(self, signal, value, link_id=0): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return {} + + +class ImpactValueReceiver: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "typ": (["STRING", "INT", "FLOAT", "BOOLEAN"], ), + "value": ("STRING", {"default": ""}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, typ, value, link_id=0): + if typ == "INT": + return (int(value), ) + elif typ == "FLOAT": + return (float(value), ) + elif typ == "BOOLEAN": + return (value.lower() == "true", ) + else: + return (value, ) + + +class ImpactImageInfo: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": ("IMAGE", ), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("batch", "height", "width", "channel") + + def doit(self, value): + return (value.shape[0], value.shape[1], value.shape[2], value.shape[3]) + + +class ImpactLatentInfo: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": ("LATENT", ), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("batch", "height", "width", "channel") + + def doit(self, value): + shape = value['samples'].shape + return (shape[0], shape[2] * 8, shape[3] * 8, shape[1]) + + +class ImpactMinMax: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}), + "a": (any_typ,), + "b": (any_typ,), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", ) + + def doit(self, mode, a, b): + if mode: + return (max(a, b), ) + else: + return (min(a, b),) + + +class ImpactQueueTrigger: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, mode): + if(mode): + PromptServer.instance.send_sync("impact-add-queue", {}) + + return (signal,) + + +class ImpactQueueTriggerCountdown: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "count": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "total": ("INT", {"default": 10, "min": 1, "max": 0xffffffffffffffff}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}), + }, + "optional": {"signal": (any_typ,),}, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ, "INT", "INT") + RETURN_NAMES = ("signal_opt", "count", "total") + OUTPUT_NODE = True + + def doit(self, count, total, mode, unique_id, signal=None): + if (mode): + if count < total - 1: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": unique_id, "widget_name": "count", "type": "int", "value": count+1}) + PromptServer.instance.send_sync("impact-add-queue", {}) + if count >= total - 1: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": unique_id, "widget_name": "count", "type": "int", "value": 0}) + + return (signal, count, total) + + + +class ImpactSetWidgetValue: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + }, + "optional": { + "boolean_value": ("BOOLEAN", {"forceInput": True}), + "int_value": ("INT", {"forceInput": True}), + "float_value": ("FLOAT", {"forceInput": True}), + "string_value": ("STRING", {"forceInput": True}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, widget_name, boolean_value=None, int_value=None, float_value=None, string_value=None, ): + kind = None + if boolean_value is not None: + value = boolean_value + kind = "BOOLEAN" + elif int_value is not None: + value = int_value + kind = "INT" + elif float_value is not None: + value = float_value + kind = "FLOAT" + elif string_value is not None: + value = string_value + kind = "STRING" + else: + value = None + + if value is not None: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": node_id, "widget_name": widget_name, "type": kind, "value": value}) + + return (signal,) + + +class ImpactNodeSetMuteState: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "set_state": ("BOOLEAN", {"default": True, "label_on": "active", "label_off": "mute"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, set_state): + PromptServer.instance.send_sync("impact-node-mute-state", {"node_id": node_id, "is_active": set_state}) + return (signal,) + + +class ImpactSleep: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "seconds": ("FLOAT", {"default": 0.5, "min": 0, "max": 3600}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, seconds): + time.sleep(seconds) + return (signal,) + + +error_skip_flag = False +try: + import cm_global + def filter_message(str): + global error_skip_flag + + if "IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE" in str: + return True + elif error_skip_flag and "ERROR:root:!!! Exception during processing !!!\n" == str: + error_skip_flag = False + return True + else: + return False + + cm_global.try_call(api='cm.register_message_collapse', f=filter_message) + +except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: `ComfyUI` or `ComfyUI-Manager` is an outdated version.") + pass + + +def workflow_to_map(workflow): + nodes = {} + links = {} + for link in workflow['links']: + links[link[0]] = link[1:] + for node in workflow['nodes']: + nodes[str(node['id'])] = node + + return nodes, links + + +class ImpactRemoteBoolean: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + "value": ("BOOLEAN", {"default": True, "label_on": "True", "label_off": "False"}), + }} + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = () + OUTPUT_NODE = True + + def doit(self, **kwargs): + return {} + + +class ImpactRemoteInt: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + "value": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff}), + }} + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = () + OUTPUT_NODE = True + + def doit(self, **kwargs): + return {} + +class ImpactControlBridge: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "Active", "label_off": "Mute/Bypass"}), + "behavior": ("BOOLEAN", {"default": True, "label_on": "Mute", "label_off": "Bypass"}), + }, + "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("value",) + OUTPUT_NODE = True + + @classmethod + def IS_CHANGED(self, value, mode, behavior=True, unique_id=None, prompt=None, extra_pnginfo=None): + # NOTE: extra_pnginfo is not populated for IS_CHANGED. + # so extra_pnginfo is useless in here + try: + workflow = core.current_prompt['extra_data']['extra_pnginfo']['workflow'] + except: + print(f"[Impact Pack] core.current_prompt['extra_data']['extra_pnginfo']['workflow']") + return 0 + + nodes, links = workflow_to_map(workflow) + next_nodes = [] + + for link in nodes[unique_id]['outputs'][0]['links']: + node_id = str(links[link][2]) + impact.utils.collect_non_reroute_nodes(nodes, links, next_nodes, node_id) + + return next_nodes + + def doit(self, value, mode, behavior=True, unique_id=None, prompt=None, extra_pnginfo=None): + global error_skip_flag + + workflow_nodes, links = workflow_to_map(extra_pnginfo['workflow']) + + active_nodes = [] + mute_nodes = [] + bypass_nodes = [] + + for link in workflow_nodes[unique_id]['outputs'][0]['links']: + node_id = str(links[link][2]) + + next_nodes = [] + impact.utils.collect_non_reroute_nodes(workflow_nodes, links, next_nodes, node_id) + + for next_node_id in next_nodes: + node_mode = workflow_nodes[next_node_id]['mode'] + + if node_mode == 0: + active_nodes.append(next_node_id) + elif node_mode == 2: + mute_nodes.append(next_node_id) + elif node_mode == 4: + bypass_nodes.append(next_node_id) + + if mode: + # active + should_be_active_nodes = mute_nodes + bypass_nodes + if len(should_be_active_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'actives': list(should_be_active_nodes)}) + nodes.interrupt_processing() + + elif behavior: + # mute + should_be_mute_nodes = active_nodes + bypass_nodes + if len(should_be_mute_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'mutes': list(should_be_mute_nodes)}) + nodes.interrupt_processing() + + else: + # bypass + should_be_bypass_nodes = active_nodes + mute_nodes + if len(should_be_bypass_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'bypasses': list(should_be_bypass_nodes)}) + nodes.interrupt_processing() + + return (value, ) + + +original_handle_execution = execution.PromptExecutor.handle_execution_error + + +def handle_execution_error(**kwargs): + execution.PromptExecutor.handle_execution_error(**kwargs) + diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..81b206d549641d05706116dc8f0cd863e98346ce --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py @@ -0,0 +1,219 @@ +import folder_paths +from impact.core import * +import os + +import mmcv +from mmdet.apis import (inference_detector, init_detector) +from mmdet.evaluation import get_classes + + +def load_mmdet(model_path): + model_config = os.path.splitext(model_path)[0] + ".py" + model = init_detector(model_config, model_path, device="cpu") + return model + + +def inference_segm_old(model, image, conf_threshold): + image = image.numpy()[0] * 255 + mmdet_results = inference_detector(model, image) + + bbox_results, segm_results = mmdet_results + label = "A" + + classes = get_classes("coco") + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_results) + ] + n, m = bbox_results[0].shape + if n == 0: + return [[], [], []] + labels = np.concatenate(labels) + bboxes = np.vstack(bbox_results) + segms = mmcv.concat_list(segm_results) + filter_idxs = np.where(bboxes[:, -1] > conf_threshold)[0] + results = [[], [], []] + for i in filter_idxs: + results[0].append(label + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + + return results + + +def inference_segm(image, modelname, conf_thres, lab="A"): + image = image.numpy()[0] * 255 + mmdet_results = inference_detector(modelname, image).pred_instances + bboxes = mmdet_results.bboxes.numpy() + segms = mmdet_results.masks.numpy() + scores = mmdet_results.scores.numpy() + + classes = get_classes("coco") + + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + labels = mmdet_results.labels + filter_inds = np.where(mmdet_results.scores > conf_thres)[0] + results = [[], [], [], []] + for i in filter_inds: + results[0].append(lab + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +def inference_bbox(modelname, image, conf_threshold): + image = image.numpy()[0] * 255 + label = "A" + output = inference_detector(modelname, image).pred_instances + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for x0, y0, x1, y1 in output.bboxes: + cv2_mask = np.zeros(cv2_gray.shape, np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n, m = output.bboxes.shape + if n == 0: + return [[], [], [], []] + + bboxes = output.bboxes.numpy() + scores = output.scores.numpy() + filter_idxs = np.where(scores > conf_threshold)[0] + results = [[], [], [], []] + for i in filter_idxs: + results[0].append(label) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +class BBoxDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + return shape, items + + def detect_combined(self, image, threshold, dilation): + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + return combine_masks(segmasks) + + def setAux(self, x): + pass + + +class SegmDetector(BBoxDetector): + segm_model = None + + def __init__(self, segm_model): + self.segm_model = segm_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + mmdet_results = inference_segm(image, self.segm_model, threshold) + segmasks = create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + segs = image.shape, items + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + + def detect_combined(self, image, threshold, dilation): + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + return combine_masks(segmasks) + + def setAux(self, x): + pass + + +class MMDetDetectorProvider: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("mmdets_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("mmdets_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR") + FUNCTION = "load_mmdet" + + CATEGORY = "ImpactPack" + + def load_mmdet(self, model_name): + mmdet_path = folder_paths.get_full_path("mmdets", model_name) + model = load_mmdet(mmdet_path) + + if model_name.startswith("bbox"): + return BBoxDetector(model), NO_SEGM_DETECTOR() + else: + return NO_BBOX_DETECTOR(), model \ No newline at end of file diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0055aef7ad55ad53b5cace1940476f4e2dff50 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py @@ -0,0 +1,38 @@ +import impact.additional_dependencies +from impact.utils import * + +impact.additional_dependencies.ensure_onnx_package() + +try: + import onnxruntime + + def onnx_inference(image, onnx_model): + # prepare image + pil = tensor2pil(image) + image = np.ascontiguousarray(pil) + image = image[:, :, ::-1] # to BGR image + image = image.astype(np.float32) + image -= [103.939, 116.779, 123.68] # 'caffe' mode image preprocessing + + # do detection + onnx_model = onnxruntime.InferenceSession(onnx_model, providers=["CPUExecutionProvider"]) + outputs = onnx_model.run( + [s_i.name for s_i in onnx_model.get_outputs()], + {onnx_model.get_inputs()[0].name: np.expand_dims(image, axis=0)}, + ) + + labels = [op for op in outputs if op.dtype == "int32"][0] + scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0] + boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0] + + # filter-out useless item + idx = np.where(labels[0] == -1)[0][0] + + labels = labels[0][:idx] + scores = scores[0][:idx] + boxes = boxes[0][:idx].astype(np.uint32) + + return labels, scores, boxes +except Exception as e: + print("[ERROR] ComfyUI-Impact-Pack: 'onnxruntime' package doesn't support 'python 3.11', yet.") + print(f"\t{e}") diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0af73efb3054005ccd38e6f456192868897ec2 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py @@ -0,0 +1,422 @@ +import folder_paths +import impact.wildcards + +class ToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + pipe = (kwargs['model'], kwargs['clip'], kwargs['vae'], kwargs['positive'], kwargs['negative'], kwargs['wildcard'], kwargs['bbox_detector'], + kwargs.get('segm_detector_opt', None), kwargs.get('sam_model_opt', None), kwargs.get('detailer_hook', None), + kwargs.get('refiner_model', None), kwargs.get('refiner_clip', None), + kwargs.get('refiner_positive', None), kwargs.get('refiner_negative', None)) + return (pipe, ) + + +class ToDetailerPipeSDXL(ToDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + +class FromDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_SDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK", "MODEL", "CLIP", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + + +class ToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, model, clip, vae, positive, negative): + pipe = (model, clip, vae, positive, negative) + return (pipe, ) + + +class FromBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return model, clip, vae, positive, negative + + +class FromBasicPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("BASIC_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("basic_pipe", "model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return basic_pipe, model, clip, vae, positive, negative + + +class BasicPipeToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + basic_pipe = kwargs['basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None + return (pipe, ) + + +class BasicPipeToDetailerPipeSDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"base_basic_pipe": ("BASIC_PIPE",), + "refiner_basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + base_basic_pipe = kwargs['base_basic_pipe'] + refiner_basic_pipe = kwargs['refiner_basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = base_basic_pipe + refiner_model, refiner_clip, refiner_vae, refiner_positive, refiner_negative = refiner_basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + return (pipe, ) + + +class DetailerPipeToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }} + + RETURN_TYPES = ("BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("base_basic_pipe", "refiner_basic_pipe") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, _, _, _, _, _, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + pipe = model, clip, vae, positive, negative + refiner_pipe = refiner_model, refiner_clip, vae, refiner_positive, refiner_negative + return (pipe, refiner_pipe) + + +class EditBasicPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"basic_pipe": ("BASIC_PIPE",), }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe, model=None, clip=None, vae=None, positive=None, negative=None): + res_model, res_clip, res_vae, res_positive, res_negative = basic_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + pipe = res_model, res_clip, res_vae, res_positive, res_negative + + return (pipe, ) + + +class EditDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE",) + RETURN_NAMES = ("detailer_pipe",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + detailer_pipe = kwargs['detailer_pipe'] + wildcard = kwargs['wildcard'] + model = kwargs.get('model', None) + clip = kwargs.get('clip', None) + vae = kwargs.get('vae', None) + positive = kwargs.get('positive', None) + negative = kwargs.get('negative', None) + bbox_detector = kwargs.get('bbox_detector', None) + sam_model = kwargs.get('sam_model', None) + segm_detector = kwargs.get('segm_detector', None) + detailer_hook = kwargs.get('detailer_hook', None) + refiner_model = kwargs.get('refiner_model', None) + refiner_clip = kwargs.get('refiner_clip', None) + refiner_positive = kwargs.get('refiner_positive', None) + refiner_negative = kwargs.get('refiner_negative', None) + + res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative = detailer_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + if bbox_detector is not None: + res_bbox_detector = bbox_detector + + if segm_detector is not None: + res_segm_detector = segm_detector + + if wildcard != "": + res_wildcard = wildcard + + if sam_model is not None: + res_sam_model = sam_model + + if detailer_hook is not None: + res_detailer_hook = detailer_hook + + if refiner_model is not None: + res_refiner_model = refiner_model + + if refiner_clip is not None: + res_refiner_clip = refiner_clip + + if refiner_positive is not None: + res_refiner_positive = refiner_positive + + if refiner_negative is not None: + res_refiner_negative = refiner_negative + + pipe = (res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, + res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, + res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative) + + return (pipe, ) + + +class EditDetailerPipeSDXL(EditDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..484c12998fad0e8e60c050420ce63227af44ba2f --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py @@ -0,0 +1,25 @@ +import comfy.sample +import traceback + +original_sample = comfy.sample.sample + + +def informative_sample(*args, **kwargs): + try: + return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations. + except RuntimeError as e: + is_model_mix_issue = False + try: + if 'mat1 and mat2 shapes cannot be multiplied' in e.args[0]: + if 'torch.nn.functional.linear' in traceback.format_exc().strip().split('\n')[-3]: + is_model_mix_issue = True + except: + pass + + if is_model_mix_issue: + raise RuntimeError("\n\n#### It seems that models and clips are mixed and interconnected between SDXL Base, SDXL Refiner, SD1.x, and SD2.x. Please verify. ####\n\n") + else: + raise e + + +comfy.sample.sample = informative_sample diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..9bcd468c1d356ed489457c4eb07138b69a0491f0 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py @@ -0,0 +1,1845 @@ +import os +import sys + +import impact.impact_server +from nodes import MAX_RESOLUTION + +from impact.utils import * +from . import core +from .core import SEG +import impact.utils as utils +from . import defs +from . import segs_upscaler +from comfy.cli_args import args +import math + + +class SEGSDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("SEGS", "IMAGE") + RETURN_NAMES = ("segs", "cnet_images") + OUTPUT_IS_LIST = (False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image.shape) + + new_segs = [] + cnet_pil_list = [] + + for i in range(batch_size): + seed += 1 + for seg in segs[1]: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else crop_ndarray4(image.numpy(), seg.crop_region) + cropped_image = to_tensor(cropped_image) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + new_segs.append(seg) + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + + enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + control_net_wrapper=seg.control_net_wrapper, cycle=cycle, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt) + + if cnet_pils is not None: + cnet_pil_list.extend(cnet_pils) + + if enhanced_image is None: + new_cropped_image = cropped_image + else: + new_cropped_image = enhanced_image + + new_seg = SEG(to_numpy(new_cropped_image), seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs), cnet_pil_list + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SEGSDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs, cnet_pil_list = SEGSDetailer.do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio, batch_size, cycle=cycle, + refiner_basic_pipe_opt=refiner_basic_pipe_opt, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return segs, cnet_pil_list + + +class SEGSPaste: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "alpha": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + }, + "optional": {"ref_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def doit(image, segs, feather, alpha=255, ref_image_opt=None): + + segs = core.segs_scale_match(segs, image.shape) + + result = None + for i, single_image in enumerate(image): + image_i = single_image.unsqueeze(0).clone() + + for seg in segs[1]: + ref_image = None + if ref_image_opt is None and seg.cropped_image is not None: + cropped_image = seg.cropped_image + if isinstance(cropped_image, np.ndarray): + cropped_image = torch.from_numpy(cropped_image) + ref_image = cropped_image[i].unsqueeze(0) + elif ref_image_opt is not None: + ref_tensor = ref_image_opt[i].unsqueeze(0) + ref_image = crop_image(ref_tensor, seg.crop_region) + if ref_image is not None: + if seg.cropped_mask.ndim == 3 and len(seg.cropped_mask) == len(image): + mask = seg.cropped_mask[i] + elif seg.cropped_mask.ndim == 3 and len(seg.cropped_mask) > 1: + print(f"[Impact Pack] WARN: SEGSPaste - The number of the mask batch({len(seg.cropped_mask)}) and the image batch({len(image)}) are different. Combine the mask frames and apply.") + combined_mask = (seg.cropped_mask[0] * 255).to(torch.uint8) + + for frame_mask in seg.cropped_mask[1:]: + combined_mask |= (frame_mask * 255).to(torch.uint8) + + combined_mask = (combined_mask/255.0).to(torch.float32) + mask = utils.to_binary_mask(combined_mask, 0.1) + else: # ndim == 2 + mask = seg.cropped_mask + + mask = tensor_gaussian_blur_mask(mask, feather) * (alpha/255) + x, y, *_ = seg.crop_region + + # ensure same device + mask = mask.to(image_i.device) + ref_image = ref_image.to(image_i.device) + + tensor_paste(image_i, ref_image, (x, y), mask) + + if result is None: + result = image_i + else: + result = torch.concat((result, image_i), dim=0) + + if not args.highvram and not args.gpu_only: + result = result.cpu() + + return (result, ) + + +class SEGSPreviewCNet: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS", ),}, } + + RETURN_TYPES = ("IMAGE", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + result_image_list = [] + + for seg in segs[1]: + file = f"{filename}_{counter:05}_.webp" + + if seg.control_net_wrapper is not None and seg.control_net_wrapper.control_image is not None: + cnet_image = seg.control_net_wrapper.control_image + result_image_list.append(cnet_image) + else: + cnet_image = empty_pil_tensor(64, 64) + + cnet_pil = utils.tensor2pil(cnet_image) + cnet_pil.save(os.path.join(full_output_folder, file)) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + return {"ui": {"images": results}, "result": (result_image_list,)} + + +class SEGSPreview: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "alpha_mode": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "min_alpha": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs, alpha_mode=True, min_alpha=0.0, fallback_image_opt=None): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + result_image_list = [] + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + if min_alpha != 0: + min_alpha = int(255 * min_alpha) + + if len(segs[1]) > 0: + if segs[1][0].cropped_image is not None: + batch_count = len(segs[1][0].cropped_image) + elif fallback_image_opt is not None: + batch_count = len(fallback_image_opt) + else: + return {"ui": {"images": results}} + + for seg in segs[1]: + result_image_batch = None + cached_mask = None + + def get_combined_mask(): + nonlocal cached_mask + + if cached_mask is not None: + return cached_mask + else: + if isinstance(seg.cropped_mask, np.ndarray): + masks = torch.tensor(seg.cropped_mask) + else: + masks = seg.cropped_mask + + cached_mask = (masks[0] * 255).to(torch.uint8) + for x in masks[1:]: + cached_mask |= (x * 255).to(torch.uint8) + cached_mask = (cached_mask/255.0).to(torch.float32) + cached_mask = utils.to_binary_mask(cached_mask, 0.1) + cached_mask = cached_mask.numpy() + + return cached_mask + + def stack_image(image, mask=None): + nonlocal result_image_batch + + if isinstance(image, np.ndarray): + image = torch.from_numpy(image) + + if mask is not None: + image *= torch.tensor(mask)[None, ..., None] + + if result_image_batch is None: + result_image_batch = image + else: + result_image_batch = torch.concat((result_image_batch, image), dim=0) + + for i in range(batch_count): + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image[i, None] + elif fallback_image_opt is not None: + # take from original image + ref_image = fallback_image_opt[i].unsqueeze(0) + cropped_image = crop_image(ref_image, seg.crop_region) + + if cropped_image is not None: + if isinstance(cropped_image, np.ndarray): + cropped_image = torch.from_numpy(cropped_image) + + cropped_image = cropped_image.clone() + cropped_pil = to_pil(cropped_image) + + if alpha_mode: + if isinstance(seg.cropped_mask, np.ndarray): + cropped_mask = seg.cropped_mask + else: + if seg.cropped_image is not None and len(seg.cropped_image) != len(seg.cropped_mask): + cropped_mask = get_combined_mask() + else: + cropped_mask = seg.cropped_mask[i].numpy() + + mask_array = (cropped_mask * 255).astype(np.uint8) + + if min_alpha != 0: + mask_array[mask_array < min_alpha] = min_alpha + + mask_pil = Image.fromarray(mask_array, mode='L').resize(cropped_pil.size) + cropped_pil.putalpha(mask_pil) + stack_image(cropped_image, cropped_mask) + else: + stack_image(cropped_image) + + file = f"{filename}_{counter:05}_.webp" + cropped_pil.save(os.path.join(full_output_folder, file)) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + if result_image_batch is not None: + result_image_list.append(result_image_batch) + + return {"ui": {"images": results}, "result": (result_image_list,) } + + +class SEGSLabelFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "preset": (['all'] + defs.detection_labels, ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def filter(segs, labels): + labels = set([label.strip() for label in labels]) + + if 'all' in labels: + return (segs, (segs[0], []), ) + else: + res_segs = [] + remained_segs = [] + + for x in segs[1]: + if x.label in labels: + res_segs.append(x) + elif 'eyes' in labels and x.label in ['left_eye', 'right_eye']: + res_segs.append(x) + elif 'eyebrows' in labels and x.label in ['left_eyebrow', 'right_eyebrow']: + res_segs.append(x) + elif 'pupils' in labels and x.label in ['left_pupil', 'right_pupil']: + res_segs.append(x) + else: + remained_segs.append(x) + + return ((segs[0], res_segs), (segs[0], remained_segs), ) + + def doit(self, segs, preset, labels): + labels = labels.split(',') + return SEGSLabelFilter.filter(segs, labels) + + +class SEGSLabelAssign: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the label to be assigned in order of segs, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS",) + RETURN_NAMES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def assign(segs, labels): + labels = [label.strip() for label in labels] + + if len(labels) != len(segs[1]): + print(f'Warning (SEGSLabelAssign): length of labels ({len(labels)}) != length of segs ({len(segs[1])})') + + labeled_segs = [] + + idx = 0 + for x in segs[1]: + if len(labels) > idx: + x = x._replace(label=labels[idx]) + labeled_segs.append(x) + idx += 1 + + return ((segs[0], labeled_segs), ) + + def doit(self, segs, labels): + labels = labels.split(',') + return SEGSLabelAssign.assign(segs, labels) + + +class SEGSOrderedFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "confidence"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, order, take_start, take_count): + segs_with_order = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + elif target == "y2": + value = y2 + elif target == "confidence": + value = seg.confidence + else: + raise Exception(f"[Impact Pack] SEGSOrderedFilter - Unexpected target '{target}'") + + segs_with_order.append((value, seg)) + + if order: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=True) + else: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=False) + + result_list = [] + remained_list = [] + + for i, item in enumerate(sorted_list): + if take_start <= i < take_start + take_count: + result_list.append(item[1]) + else: + remained_list.append(item[1]) + + return (segs[0], result_list), (segs[0], remained_list), + + +class SEGSRangeFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent", "confidence(0-100)"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, mode, min_value, max_value): + new_segs = [] + remained_segs = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "length_percent": + h = y2 - y1 + w = x2 - x1 + value = max(h/w, w/h)*100 + print(f"value={value}") + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + elif target == "y2": + value = y2 + elif target == "confidence(0-100)": + value = seg.confidence*100 + else: + raise Exception(f"[Impact Pack] SEGSRangeFilter - Unexpected target '{target}'") + + if mode and min_value <= value <= max_value: + print(f"[in] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + elif not mode and (value < min_value or value > max_value): + print(f"[out] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + else: + remained_segs.append(seg) + print(f"[filter] value={value} / {mode}, {min_value}, {max_value}") + + return (segs[0], new_segs), (segs[0], remained_segs), + + +class SEGSToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, fallback_image_opt=None): + results = list() + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = to_tensor(seg.cropped_image) + elif fallback_image_opt is not None: + # take from original image + cropped_image = to_tensor(crop_image(fallback_image_opt, seg.crop_region)) + else: + cropped_image = empty_pil_tensor() + + results.append(cropped_image) + + if len(results) == 0: + results.append(empty_pil_tensor()) + + return (results,) + + +class SEGSToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + if len(masks) == 0: + empty_mask = torch.zeros(segs[0], dtype=torch.float32, device="cpu") + masks = [empty_mask] + masks = [utils.make_3d_mask(mask) for mask in masks] + return (masks,) + + +class SEGSToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + masks = [utils.make_3d_mask(mask) for mask in masks] + mask_batch = torch.concat(masks) + return (mask_batch,) + + +class SEGSConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs1": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + dim = None + res = None + + for k, v in list(kwargs.items()): + if v[0] == (0, 0) or len(v[1]) == 0: + continue + + if dim is None: + dim = v[0] + res = v[1] + else: + if v[0] == dim: + res = res + v[1] + else: + print(f"ERROR: source shape of 'segs1'{dim} and '{k}'{v[0]} are different. '{k}' will be ignored") + + if dim is None: + empty_segs = ((0, 0), []) + return (empty_segs, ) + else: + return ((dim, res), ) + + +class Count_Elts_in_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("INT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + return (len(segs[1]), ) + + +class DecomposeSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS_HEADER", "SEG_ELT",) + OUTPUT_IS_LIST = (False, True, ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + return segs + + +class AssembleSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_header": ("SEGS_HEADER", ), + "seg_elt": ("SEG_ELT", ), + }, + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_header, seg_elt): + return ((seg_header[0], seg_elt), ) + + +class From_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + } + + RETURN_TYPES = ("SEG_ELT", "IMAGE", "MASK", "SEG_ELT_crop_region", "SEG_ELT_bbox", "SEG_ELT_control_net_wrapper", "FLOAT", "STRING") + RETURN_NAMES = ("seg_elt", "cropped_image", "cropped_mask", "crop_region", "bbox", "control_net_wrapper", "confidence", "label") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt): + cropped_image = to_tensor(seg_elt.cropped_image) if seg_elt.cropped_image is not None else None + return (seg_elt, cropped_image, to_tensor(seg_elt.cropped_mask), seg_elt.crop_region, seg_elt.bbox, seg_elt.control_net_wrapper, seg_elt.confidence, seg_elt.label,) + + +class From_SEG_ELT_bbox: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox": ("SEG_ELT_bbox", ), + }, + } + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("left", "top", "right", "bottom") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, bbox): + return bbox + + +class From_SEG_ELT_crop_region: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "crop_region": ("SEG_ELT_crop_region", ), + }, + } + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("left", "top", "right", "bottom") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, crop_region): + return crop_region + + +class Edit_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + "optional": { + "cropped_image_opt": ("IMAGE", ), + "cropped_mask_opt": ("MASK", ), + "crop_region_opt": ("SEG_ELT_crop_region", ), + "bbox_opt": ("SEG_ELT_bbox", ), + "control_net_wrapper_opt": ("SEG_ELT_control_net_wrapper", ), + "confidence_opt": ("FLOAT", {"min": 0, "max": 1.0, "step": 0.1, "forceInput": True}), + "label_opt": ("STRING", {"multiline": False, "forceInput": True}), + } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt, cropped_image_opt=None, cropped_mask_opt=None, confidence_opt=None, crop_region_opt=None, + bbox_opt=None, label_opt=None, control_net_wrapper_opt=None): + + cropped_image = seg_elt.cropped_image if cropped_image_opt is None else cropped_image_opt + cropped_mask = seg_elt.cropped_mask if cropped_mask_opt is None else cropped_mask_opt + confidence = seg_elt.confidence if confidence_opt is None else confidence_opt + crop_region = seg_elt.crop_region if crop_region_opt is None else crop_region_opt + bbox = seg_elt.bbox if bbox_opt is None else bbox_opt + label = seg_elt.label if label_opt is None else label_opt + control_net_wrapper = seg_elt.control_net_wrapper if control_net_wrapper_opt is None else control_net_wrapper_opt + + cropped_image = cropped_image.numpy() if cropped_image is not None else None + + if isinstance(cropped_mask, torch.Tensor): + if len(cropped_mask.shape) == 3: + cropped_mask = cropped_mask.squeeze(0) + + cropped_mask = cropped_mask.numpy() + + seg = SEG(cropped_image, cropped_mask, confidence, crop_region, bbox, label, control_net_wrapper) + + return (seg,) + + +class DilateMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask, dilation): + mask = core.dilate_mask(mask.numpy(), dilation) + mask = torch.from_numpy(mask) + mask = utils.make_3d_mask(mask) + return (mask, ) + + +class GaussianBlurMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "kernel_size": ("INT", {"default": 10, "min": 0, "max": 100, "step": 1}), + "sigma": ("FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}), + }} + + RETURN_TYPES = ("MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask, kernel_size, sigma): + # Some custom nodes use abnormal 4-dimensional masks in the format of b, c, h, w. In the impact pack, internal 4-dimensional masks are required in the format of b, h, w, c. Therefore, normalization is performed using the normal mask format, which is 3-dimensional, before proceeding with the operation. + mask = make_3d_mask(mask) + mask = torch.unsqueeze(mask, dim=-1) + mask = utils.tensor_gaussian_blur_mask(mask, kernel_size, sigma) + mask = torch.squeeze(mask, dim=-1) + return (mask, ) + + +class DilateMaskInSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, dilation): + new_segs = [] + for seg in segs[1]: + mask = core.dilate_mask(seg.cropped_mask, dilation) + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(seg) + + return ((segs[0], new_segs), ) + + +class GaussianBlurMaskInSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "kernel_size": ("INT", {"default": 10, "min": 0, "max": 100, "step": 1}), + "sigma": ("FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}), + }} + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, kernel_size, sigma): + new_segs = [] + for seg in segs[1]: + mask = utils.tensor_gaussian_blur_mask(seg.cropped_mask, kernel_size, sigma) + mask = torch.squeeze(mask, dim=-1).squeeze(0).numpy() + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(seg) + + return ((segs[0], new_segs), ) + + +class Dilate_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg, dilation): + mask = core.dilate_mask(seg.cropped_mask, dilation) + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class SEG_ELT_BBOX_ScaleBy: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg": ("SEG_ELT", ), + "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}), } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def fill_zero_outside_bbox(mask, crop_region, bbox): + cx1, cy1, _, _ = crop_region + x1, y1, x2, y2 = bbox + x1, y1, x2, y2 = x1-cx1, y1-cy1, x2-cx1, y2-cy1 + h, w = mask.shape + + x1 = min(w-1, max(0, x1)) + x2 = min(w-1, max(0, x2)) + y1 = min(h-1, max(0, y1)) + y2 = min(h-1, max(0, y2)) + + mask_cropped = mask.copy() + mask_cropped[:, :x1] = 0 # zero fill left side + mask_cropped[:, x2:] = 0 # zero fill right side + mask_cropped[:y1, :] = 0 # zero fill top side + mask_cropped[y2:, :] = 0 # zero fill bottom side + return mask_cropped + + def doit(self, seg, scale_by): + x1, y1, x2, y2 = seg.bbox + w = x2-x1 + h = y2-y1 + + dw = int((w * scale_by - w)/2) + dh = int((h * scale_by - h)/2) + + bbox = (x1-dw, y1-dh, x2+dw, y2+dh) + + cropped_mask = SEG_ELT_BBOX_ScaleBy.fill_zero_outside_bbox(seg.cropped_mask, seg.crop_region, bbox) + seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class EmptySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self): + shape = 0, 0 + return ((shape, []),) + + +class SegsToCombinedMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS",), }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs): + mask = core.segs_to_combined_mask(segs) + mask = utils.make_3d_mask(mask) + return (mask,) + + +class MediaPipeFaceMeshToSEGS: + @classmethod + def INPUT_TYPES(s): + bool_true_widget = ("BOOLEAN", {"default": True, "label_on": "Enabled", "label_off": "Disabled"}) + bool_false_widget = ("BOOLEAN", {"default": False, "label_on": "Enabled", "label_off": "Disabled"}) + return {"required": { + "image": ("IMAGE",), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "crop_min_size": ("INT", {"min": 10, "max": MAX_RESOLUTION, "step": 1, "default": 50}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 1}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "face": bool_true_widget, + "mouth": bool_false_widget, + "left_eyebrow": bool_false_widget, + "left_eye": bool_false_widget, + "left_pupil": bool_false_widget, + "right_eyebrow": bool_false_widget, + "right_eye": bool_false_widget, + "right_pupil": bool_false_widget, + }, + # "optional": {"reference_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + # padding is obsolete now + # https://github.com/Fannovel16/comfyui_controlnet_aux/blob/1ec41fceff1ee99596445a0c73392fd91df407dc/utils.py#L33 + # def calc_pad(h_raw, w_raw): + # resolution = normalize_size_base_64(h_raw, w_raw) + # + # def pad64(x): + # return int(np.ceil(float(x) / 64.0) * 64 - x) + # + # k = float(resolution) / float(min(h_raw, w_raw)) + # h_target = int(np.round(float(h_raw) * k)) + # w_target = int(np.round(float(w_raw) * k)) + # + # return pad64(h_target), pad64(w_target) + + # if reference_image_opt is not None: + # if image.shape[1:] != reference_image_opt.shape[1:]: + # scale_by1 = reference_image_opt.shape[1] / image.shape[1] + # scale_by2 = reference_image_opt.shape[2] / image.shape[2] + # scale_by = min(scale_by1, scale_by2) + # + # # padding is obsolete now + # # h_pad, w_pad = calc_pad(reference_image_opt.shape[1], reference_image_opt.shape[2]) + # # if h_pad != 0: + # # # height padded + # # image = image[:, :-h_pad, :, :] + # # elif w_pad != 0: + # # # width padded + # # image = image[:, :, :-w_pad, :] + # + # image = nodes.ImageScaleBy().upscale(image, "bilinear", scale_by)[0] + + result = core.mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil) + return (result, ) + + +class MaskToSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + @staticmethod + def doit(mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + mask = make_2d_mask(mask) + result = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + + return (result, ) + + +class MaskToSEGS_for_AnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + @staticmethod + def doit(mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + if (len(mask.shape) == 4 and mask.shape[1] > 1) or (len(mask.shape) == 3 and mask.shape[0] > 1): + mask = make_3d_mask(mask) + if contour_fill: + print(f"[Impact Pack] MaskToSEGS_for_AnimateDiff: 'contour_fill' is ignored because batch mask 'contour_fill' is not supported.") + result = core.batch_mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size) + return (result, ) + + mask = make_2d_mask(mask) + segs = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + all_masks = SEGSToMaskList().doit(segs)[0] + + result_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + result_mask |= (mask * 255).to(torch.uint8) + + result_mask = (result_mask/255.0).to(torch.float32) + result_mask = utils.to_binary_mask(result_mask, 0.1)[0] + + return MaskToSEGS.doit(result_mask, False, crop_factor, False, drop_size, contour_fill) + + +class IPAdapterApplySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "ipadapter_pipe": ("IPADAPTER_PIPE",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "noise": ("FLOAT", {"default": 0.4, "min": 0.0, "max": 1.0, "step": 0.01}), + "weight_type": (["original", "linear", "channel penalty"], {"default": 'channel penalty'}), + "start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_at": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0, "step": 0.001}), + "unfold_batch": ("BOOLEAN", {"default": False}), + "faceid_v2": ("BOOLEAN", {"default": False}), + "weight_v2": ("FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}), + "context_crop_factor": ("FLOAT", {"default": 1.2, "min": 1.0, "max": 100, "step": 0.1}), + "reference_image": ("IMAGE",), + }, + "optional": { + "combine_embeds": (["concat", "add", "subtract", "average", "norm average"],), + "neg_image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, faceid_v2, weight_v2, context_crop_factor, reference_image, combine_embeds="concat", neg_image=None): + + if len(ipadapter_pipe) == 4: + print(f"[Impact Pack] IPAdapterApplySEGS: Installed Inspire Pack is outdated.") + raise Exception("Inspire Pack is outdated.") + + new_segs = [] + + h, w = segs[0] + + if reference_image.shape[2] != w or reference_image.shape[1] != h: + reference_image = tensor_resize(reference_image, w, h) + + for seg in segs[1]: + # The context_crop_region sets how much wider the IPAdapter context will reflect compared to the crop_region, not the bbox + context_crop_region = make_crop_region(w, h, seg.crop_region, context_crop_factor) + cropped_image = crop_image(reference_image, context_crop_region) + + control_net_wrapper = core.IPAdapterWrapper(ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, weight_v2, cropped_image, neg_image=neg_image, prev_control_net=seg.control_net_wrapper, combine_embeds=combine_embeds) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetApplySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "control_net": ("CONTROL_NET",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }, + "optional": { + "segs_preprocessor": ("SEGS_PREPROCESSOR",), + "control_image": ("IMAGE",) + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, control_net, strength, segs_preprocessor=None, control_image=None): + new_segs = [] + + for seg in segs[1]: + control_net_wrapper = core.ControlNetWrapper(control_net, strength, segs_preprocessor, seg.control_net_wrapper, + original_size=segs[0], crop_region=seg.crop_region, control_image=control_image) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetApplyAdvancedSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "control_net": ("CONTROL_NET",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) + }, + "optional": { + "segs_preprocessor": ("SEGS_PREPROCESSOR",), + "control_image": ("IMAGE",) + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, control_net, strength, start_percent, end_percent, segs_preprocessor=None, control_image=None): + new_segs = [] + + for seg in segs[1]: + control_net_wrapper = core.ControlNetAdvancedWrapper(control_net, strength, start_percent, end_percent, segs_preprocessor, + seg.control_net_wrapper, original_size=segs[0], crop_region=seg.crop_region, + control_image=control_image) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetClearSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS",), }, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs): + new_segs = [] + + for seg in segs[1]: + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class SEGSSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "segs1": ("SEGS",), + }, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"segs{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + print(f"SEGSSwitch: invalid select index ('segs1' is selected)") + return (kwargs['segs1'],) + + +class SEGSPicker: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "picks": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "segs": ("SEGS",), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(picks, segs, fallback_image_opt=None, unique_id=None): + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + # generate candidates image + cands = [] + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif fallback_image_opt is not None: + # take from original image + cropped_image = crop_image(fallback_image_opt, seg.crop_region) + else: + cropped_image = empty_pil_tensor() + + mask_array = seg.cropped_mask.copy() + mask_array[mask_array < 0.3] = 0.3 + mask_array = mask_array[None, ..., None] + cropped_image = cropped_image * mask_array + + cands.append(cropped_image) + + impact.impact_server.segs_picker_map[unique_id] = cands + + # pass only selected + pick_ids = set() + + for pick in picks.split(","): + try: + pick_ids.add(int(pick)-1) + except Exception: + pass + + new_segs = [] + for i in pick_ids: + if 0 <= i < len(segs[1]): + new_segs.append(segs[1][i]) + + return ((segs[0], new_segs),) + + +class DefaultImageForSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "override": ("BOOLEAN", {"default": True}), + }} + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, image, override): + results = [] + + segs = core.segs_scale_match(segs, image.shape) + + if len(segs[1]) > 0: + if segs[1][0].cropped_image is not None: + batch_count = len(segs[1][0].cropped_image) + else: + batch_count = len(image) + + for seg in segs[1]: + if seg.cropped_image is not None and not override: + cropped_image = seg.cropped_image + else: + cropped_image = None + for i in range(0, batch_count): + # take from original image + ref_image = image[i].unsqueeze(0) + cropped_image2 = crop_image(ref_image, seg.crop_region) + + if cropped_image is None: + cropped_image = cropped_image2 + else: + cropped_image = torch.cat((cropped_image, cropped_image2), dim=0) + + new_seg = SEG(cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + results.append(new_seg) + + return ((segs[0], results), ) + else: + return (segs, ) + + +class RemoveImageFromSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS", ), }} + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs): + results = [] + + if len(segs[1]) > 0: + for seg in segs[1]: + new_seg = SEG(None, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + results.append(new_seg) + + return ((segs[0], results), ) + else: + return (segs, ) + + +class MakeTileSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "bbox_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.01}), + "min_overlap": ("INT", {"default": 5, "min": 0, "max": 512, "step": 1}), + "filter_segs_dilation": ("INT", {"default": 20, "min": -255, "max": 255, "step": 1}), + "mask_irregularity": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "irregular_mask_mode": (["Reuse fast", "Reuse quality", "All random fast", "All random quality"],) + }, + "optional": { + "filter_in_segs_opt": ("SEGS", ), + "filter_out_segs_opt": ("SEGS", ), + } + } + + RETURN_TYPES = ("SEGS",) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/__for_testing" + + @staticmethod + def doit(images, bbox_size, crop_factor, min_overlap, filter_segs_dilation, mask_irregularity=0, irregular_mask_mode="Reuse fast", filter_in_segs_opt=None, filter_out_segs_opt=None): + if bbox_size <= 2*min_overlap: + new_min_overlap = bbox_size / 2 + print(f"[MakeTileSEGS] min_overlap should be greater than bbox_size. (value changed: {min_overlap} => {new_min_overlap})") + min_overlap = new_min_overlap + + _, ih, iw, _ = images.size() + + mask_cache = None + mask_quality = 512 + if mask_irregularity > 0: + if irregular_mask_mode == "Reuse fast": + mask_quality = 128 + mask_cache = np.zeros((128, 128)).astype(np.float32) + core.random_mask(mask_cache, (0, 0, 128, 128), factor=mask_irregularity, size=mask_quality) + elif irregular_mask_mode == "Reuse quality": + mask_quality = 512 + mask_cache = np.zeros((512, 512)).astype(np.float32) + core.random_mask(mask_cache, (0, 0, 512, 512), factor=mask_irregularity, size=mask_quality) + elif irregular_mask_mode == "All random fast": + mask_quality = 512 + + # compensate overlap/bbox_size for irregular mask + if mask_irregularity > 0: + compensate = max(6, int(mask_quality * mask_irregularity / 4)) + min_overlap += compensate + bbox_size += compensate*2 + + # create exclusion mask + if filter_out_segs_opt is not None: + exclusion_mask = core.segs_to_combined_mask(filter_out_segs_opt) + exclusion_mask = utils.make_3d_mask(exclusion_mask) + exclusion_mask = utils.resize_mask(exclusion_mask, (ih, iw)) + exclusion_mask = dilate_mask(exclusion_mask.cpu().numpy(), filter_segs_dilation) + else: + exclusion_mask = None + + if filter_in_segs_opt is not None: + and_mask = core.segs_to_combined_mask(filter_in_segs_opt) + and_mask = utils.make_3d_mask(and_mask) + and_mask = utils.resize_mask(and_mask, (ih, iw)) + and_mask = dilate_mask(and_mask.cpu().numpy(), filter_segs_dilation) + + a, b = core.mask_to_segs(and_mask, True, 1.0, False, 0) + if len(b) == 0: + return ((a, b),) + + start_x, start_y, c, d = b[0].crop_region + w = c - start_x + h = d - start_y + else: + start_x = 0 + start_y = 0 + h, w = ih, iw + and_mask = None + + # calculate tile factors + if bbox_size > h or bbox_size > w: + new_bbox_size = min(bbox_size, min(w, h)) + print(f"[MaskTileSEGS] bbox_size is greater than resolution (value changed: {bbox_size} => {new_bbox_size}") + bbox_size = new_bbox_size + + n_horizontal = math.ceil(w / (bbox_size - min_overlap)) + n_vertical = math.ceil(h / (bbox_size - min_overlap)) + + w_overlap_sum = (bbox_size * n_horizontal) - w + if w_overlap_sum < 0: + n_horizontal += 1 + w_overlap_sum = (bbox_size * n_horizontal) - w + + w_overlap_size = 0 if n_horizontal == 1 else int(w_overlap_sum/(n_horizontal-1)) + + h_overlap_sum = (bbox_size * n_vertical) - h + if h_overlap_sum < 0: + n_vertical += 1 + h_overlap_sum = (bbox_size * n_vertical) - h + + h_overlap_size = 0 if n_vertical == 1 else int(h_overlap_sum/(n_vertical-1)) + + new_segs = [] + + if w_overlap_size == bbox_size: + n_horizontal = 1 + + if h_overlap_size == bbox_size: + n_vertical = 1 + + y = start_y + for j in range(0, n_vertical): + x = start_x + for i in range(0, n_horizontal): + x1 = x + y1 = y + + if x+bbox_size < iw-1: + x2 = x+bbox_size + else: + x2 = iw + x1 = iw-bbox_size + + if y+bbox_size < ih-1: + y2 = y+bbox_size + else: + y2 = ih + y1 = ih-bbox_size + + bbox = x1, y1, x2, y2 + crop_region = make_crop_region(iw, ih, bbox, crop_factor) + cx1, cy1, cx2, cy2 = crop_region + + mask = np.zeros((cy2 - cy1, cx2 - cx1)).astype(np.float32) + + rel_left = x1 - cx1 + rel_top = y1 - cy1 + rel_right = x2 - cx1 + rel_bot = y2 - cy1 + + if mask_irregularity > 0: + if mask_cache is not None: + core.adaptive_mask_paste(mask, mask_cache, (rel_left, rel_top, rel_right, rel_bot)) + else: + core.random_mask(mask, (rel_left, rel_top, rel_right, rel_bot), factor=mask_irregularity, size=mask_quality) + + # corner filling + if rel_left == 0: + pad = int((x2 - x1) / 8) + mask[rel_top:rel_bot, :pad] = 1.0 + + if rel_top == 0: + pad = int((y2 - y1) / 8) + mask[:pad, rel_left:rel_right] = 1.0 + + if rel_right == mask.shape[1]: + pad = int((x2 - x1) / 8) + mask[rel_top:rel_bot, -pad:] = 1.0 + + if rel_bot == mask.shape[0]: + pad = int((y2 - y1) / 8) + mask[-pad:, rel_left:rel_right] = 1.0 + else: + mask[rel_top:rel_bot, rel_left:rel_right] = 1.0 + + mask = torch.tensor(mask) + + if exclusion_mask is not None: + exclusion_mask_cropped = exclusion_mask[cy1:cy2, cx1:cx2] + mask[exclusion_mask_cropped != 0] = 0.0 + + if and_mask is not None: + and_mask_cropped = and_mask[cy1:cy2, cx1:cx2] + mask[and_mask_cropped == 0] = 0.0 + + is_mask_zero = torch.all(mask == 0.0).item() + + if not is_mask_zero: + item = SEG(None, mask.numpy(), 1.0, crop_region, bbox, "", None) + new_segs.append(item) + + x += bbox_size - w_overlap_size + y += bbox_size - h_overlap_size + + res = (ih, iw), new_segs # segs + return (res,) + + +class SEGSUpscaler: + @classmethod + def INPUT_TYPES(s): + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": { + "image": ("IMAGE",), + "segs": ("SEGS",), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL",), + "upscaler_hook_opt": ("UPSCALER_HOOK",), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + @staticmethod + def doit(image, segs, model, clip, vae, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=None, upscaler_hook_opt=None, scheduler_func_opt=None): + + new_image = segs_upscaler.upscaler(image, upscale_model_opt, rescale_factor, resampling_method, supersample, rounding_modulus) + + segs = core.segs_scale_match(segs, new_image.shape) + + ordered_segs = segs[1] + + for i, seg in enumerate(ordered_segs): + cropped_image = crop_ndarray4(new_image.numpy(), seg.crop_region) + cropped_image = to_tensor(cropped_image) + mask = to_tensor(seg.cropped_mask) + mask = tensor_gaussian_blur_mask(mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"SEGSUpscaler: segment skip [empty mask]") + continue + + cropped_mask = seg.cropped_mask + + seg_seed = seed + i + + enhanced_image = segs_upscaler.img2img_segs(cropped_image, model, clip, vae, seg_seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, + noise_mask=cropped_mask, control_net_wrapper=seg.control_net_wrapper, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + if not (enhanced_image is None): + new_image = new_image.cpu() + enhanced_image = enhanced_image.cpu() + left = seg.crop_region[0] + top = seg.crop_region[1] + tensor_paste(new_image, enhanced_image, (left, top), mask) + + if upscaler_hook_opt is not None: + new_image = upscaler_hook_opt.post_paste(new_image) + + enhanced_img = tensor_convert_rgb(new_image) + + return (enhanced_img,) + + +class SEGSUpscalerPipe: + @classmethod + def INPUT_TYPES(s): + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": { + "image": ("IMAGE",), + "segs": ("SEGS",), + "basic_pipe": ("BASIC_PIPE",), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL",), + "upscaler_hook_opt": ("UPSCALER_HOOK",), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + @staticmethod + def doit(image, segs, basic_pipe, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=None, upscaler_hook_opt=None, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + + return SEGSUpscaler.doit(image, segs, model, clip, vae, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=upscale_model_opt, upscaler_hook_opt=upscaler_hook_opt, scheduler_func_opt=scheduler_func_opt) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py new file mode 100644 index 0000000000000000000000000000000000000000..beb85d3a3bb16fdf4990bed0ac97ed87333465d6 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py @@ -0,0 +1,132 @@ +from impact.utils import * +from impact import impact_sampling +from comfy import model_management +from comfy.cli_args import args +import nodes + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + print(f"[Impact Pack] ComfyUI is an outdated version. The DifferentialDiffusion feature will be disabled.") + + +# Implementation based on `https://github.com/lingondricka2/Upscaler-Detailer` + +# code from comfyroll ---> +# https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/main/nodes/functions_upscale.py + +def upscale_with_model(upscale_model, image): + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = image.movedim(-1, -3).to(device) + free_memory = model_management.get_free_memory(device) + + tile = 512 + overlap = 32 + + oom = True + while oom: + try: + steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) + pbar = comfy.utils.ProgressBar(steps) + s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) + oom = False + except model_management.OOM_EXCEPTION as e: + tile //= 2 + if tile < 128: + raise e + + s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0) + return s + + +def apply_resize_image(image: Image.Image, original_width, original_height, rounding_modulus, mode='scale', supersample='true', factor: int = 2, width: int = 1024, height: int = 1024, + resample='bicubic'): + # Calculate the new width and height based on the given mode and parameters + if mode == 'rescale': + new_width, new_height = int(original_width * factor), int(original_height * factor) + else: + m = rounding_modulus + original_ratio = original_height / original_width + height = int(width * original_ratio) + + new_width = width if width % m == 0 else width + (m - width % m) + new_height = height if height % m == 0 else height + (m - height % m) + + # Define a dictionary of resampling filters + resample_filters = {'nearest': 0, 'bilinear': 2, 'bicubic': 3, 'lanczos': 1} + + # Apply supersample + if supersample == 'true': + image = image.resize((new_width * 8, new_height * 8), resample=Image.Resampling(resample_filters[resample])) + + # Resize the image using the given resampling filter + resized_image = image.resize((new_width, new_height), resample=Image.Resampling(resample_filters[resample])) + + return resized_image + + +def upscaler(image, upscale_model, rescale_factor, resampling_method, supersample, rounding_modulus): + if upscale_model is not None: + up_image = upscale_with_model(upscale_model, image) + else: + up_image = image + + pil_img = tensor2pil(image) + original_width, original_height = pil_img.size + scaled_image = pil2tensor(apply_resize_image(tensor2pil(up_image), original_width, original_height, rounding_modulus, 'rescale', + supersample, rescale_factor, 1024, resampling_method)) + return scaled_image + +# <--- + + +def img2img_segs(image, model, clip, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, noise_mask, control_net_wrapper=None, + inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + original_image_size = image.shape[1:3] + + # Match to original image size + if original_image_size[0] % 8 > 0 or original_image_size[1] % 8 > 0: + scale = 8/min(original_image_size[0], original_image_size[1]) + 1 + w = int(original_image_size[1] * scale) + h = int(original_image_size[0] * scale) + image = tensor_resize(image, w, h) + + if noise_mask is not None: + noise_mask = tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0: + model = nodes_differential_diffusion.DifferentialDiffusion().apply(model)[0] + + if control_net_wrapper is not None: + positive, negative, _ = control_net_wrapper.apply(positive, negative, image, noise_mask) + + # prepare mask + if noise_mask is not None and inpaint_model: + positive, negative, latent_image = nodes.InpaintModelConditioning().encode(positive, negative, image, vae, noise_mask) + else: + latent_image = to_latent_image(image, vae) + if noise_mask is not None: + latent_image['noise_mask'] = noise_mask + + refined_latent = latent_image + + # ksampler + refined_latent = impact_sampling.ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, refined_latent, denoise, scheduler_func=scheduler_func_opt) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_latent['samples']) + + # prevent mixing of device + refined_image = refined_image.cpu() + + # Match to original image size + if refined_image.shape[1:3] != original_image_size: + refined_image = tensor_resize(refined_image, original_image_size[1], original_image_size[0]) + + # don't convert to latent - latent break image + # preserving pil is much better + return refined_image diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..24b64fca35daa2f885c99a3511e6ec04ad80d947 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py @@ -0,0 +1,829 @@ +import math +import impact.core as core +from comfy_extras.nodes_custom_sampler import Noise_RandomNoise +from impact.utils import * +from nodes import MAX_RESOLUTION +import nodes +from impact.impact_sampling import KSamplerWrapper, KSamplerAdvancedWrapper, separated_sample, impact_sample + + +class TiledKSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE", ) + }} + + TOOLTIPS = { + "input": { + "seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "cfg": "classifier free guidance value", + "sampler_name": "sampler", + "scheduler": "noise schedule", + "denoise": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned.", + "tile_width": "Sets the width of the tile to be used in TiledKSampler.", + "tile_height": "Sets the height of the tile to be used in TiledKSampler.", + "tiling_strategy": "Sets the tiling strategy for TiledKSampler.", + "basic_pipe": "basic_pipe input for sampling", + }, + "output": ("sampler wrapper. (Can be used when generating a regional_prompt.)", ) + } + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, cfg, sampler_name, scheduler, denoise, + tile_width, tile_height, tiling_strategy, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.TiledKSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy) + return (sampler, ) + + +class KSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE", ) + }, + "optional": { + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + TOOLTIPS = { + "input": { + "seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "cfg": "classifier free guidance value", + "sampler_name": "sampler", + "scheduler": "noise schedule", + "denoise": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned.", + "basic_pipe": "basic_pipe input for sampling", + "scheduler_func_opt": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored.", + }, + "output": ("sampler wrapper. (Can be used when generating a regional_prompt.)", ) + } + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, cfg, sampler_name, scheduler, denoise, basic_pipe, scheduler_func_opt=None): + model, _, _, positive, negative = basic_pipe + sampler = KSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=scheduler_func_opt) + return (sampler, ) + + +class KSamplerAdvancedProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "sigma_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE", ) + }, + "optional": { + "sampler_opt": ("SAMPLER", ), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + TOOLTIPS = { + "input": { + "cfg": "classifier free guidance value", + "sampler_name": "sampler", + "scheduler": "noise schedule", + "sigma_factor": "Multiplier of noise schedule", + "basic_pipe": "basic_pipe input for sampling", + "sampler_opt": "[OPTIONAL] Uses the passed sampler instead of internal impact_sampler.", + "scheduler_func_opt": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored.", + }, + "output": ("sampler wrapper. (Can be used when generating a regional_prompt.)", ) + } + + RETURN_TYPES = ("KSAMPLER_ADVANCED",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(cfg, sampler_name, scheduler, basic_pipe, sigma_factor=1.0, sampler_opt=None, scheduler_func_opt=None): + model, _, _, positive, negative = basic_pipe + sampler = KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=sampler_opt, sigma_factor=sigma_factor, scheduler_func=scheduler_func_opt) + return (sampler, ) + + +class TwoSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent_image": ("LATENT", ), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ) + }, + } + + TOOLTIPS = { + "input": { + "latent_image": "input latent image", + "base_sampler": "Sampler to apply to the region outside the mask.", + "mask_sampler": "Sampler to apply to the masked region.", + "mask": "region mask", + }, + "output": ("result latent", ) + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(latent_image, base_sampler, mask_sampler, mask): + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample(latent_image) + + new_latent_image['noise_mask'] = mask + new_latent_image = mask_sampler.sample(new_latent_image) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class TwoAdvancedSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "mask_sampler": ("KSAMPLER_ADVANCED", ), + "mask": ("MASK", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}) + }, + } + + TOOLTIPS = { + "input": { + "seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "denoise": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned.", + "samples": "input latent image", + "base_sampler": "Sampler to apply to the region outside the mask.", + "mask_sampler": "Sampler to apply to the masked region.", + "mask": "region mask", + "overlap_factor": "To smooth the seams of the region boundaries, expand the mask by the overlap_factor amount to overlap with other regions.", + }, + "output": ("result latent", ) + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, denoise, samples, base_sampler, mask_sampler, mask, overlap_factor): + regional_prompts = RegionalPrompt().doit(mask=mask, advanced_sampler=mask_sampler)[0] + + return RegionalSampler().doit(seed=seed, seed_2nd=0, seed_2nd_mode="ignore", steps=steps, base_only_steps=1, + denoise=denoise, samples=samples, base_sampler=base_sampler, + regional_prompts=regional_prompts, overlap_factor=overlap_factor, + restore_latent=True, additional_mode="ratio between", + additional_sampler="AUTO", additional_sigma_ratio=0.3) + + +class RegionalPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "advanced_sampler": ("KSAMPLER_ADVANCED", ), + }, + "optional": { + "variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "variation_method": (["linear", "slerp"],), + } + } + + TOOLTIPS = { + "input": { + "mask": "region mask", + "advanced_sampler": "sampler for specified region", + }, + "output": ("regional prompts. (Can be used in the RegionalSampler.)", ) + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(mask, advanced_sampler, variation_seed=0, variation_strength=0.0, variation_method="linear"): + regional_prompt = core.REGIONAL_PROMPT(mask, advanced_sampler, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method) + return ([regional_prompt], ) + + +class CombineRegionalPrompts: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "regional_prompts1": ("REGIONAL_PROMPTS", ), + }, + } + + TOOLTIPS = { + "input": { + "regional_prompts1": "input regional_prompts. (Connecting to the input slot increases the number of additional slots.)", + }, + "output": ("Combined REGIONAL_PROMPTS", ) + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(**kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class CombineConditionings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning1": ("CONDITIONING", ), + }, + } + + TOOLTIPS = { + "input": { + "conditioning1": "input conditionings. (Connecting to the input slot increases the number of additional slots.)", + }, + "output": ("Combined conditioning", ) + } + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(**kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class ConcatConditionings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning1": ("CONDITIONING", ), + }, + } + + TOOLTIPS = { + "input": { + "conditioning1": "input conditionings. (Connecting to the input slot increases the number of additional slots.)", + }, + "output": ("Concatenated conditioning", ) + } + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(**kwargs): + conditioning_to = list(kwargs.values())[0] + + for k, conditioning_from in list(kwargs.items())[1:]: + out = [] + if len(conditioning_from) > 1: + print("Warning: ConcatConditionings {k} contains more than 1 cond, only the first one will actually be applied to conditioning1.") + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + tw = torch.cat((t1, cond_from), 1) + n = [tw, conditioning_to[i][1].copy()] + out.append(n) + + conditioning_to = out + + return (out, ) + + +class RegionalSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "seed_2nd": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "seed_2nd_mode": (["ignore", "fixed", "seed+seed_2nd", "seed-seed_2nd", "increment", "decrement", "randomize"], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "base_only_steps": ("INT", {"default": 2, "min": 0, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "regional_prompts": ("REGIONAL_PROMPTS", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between"}), + "additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"],), + "additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + TOOLTIPS = { + "input": { + "seed": "Random seed to use for generating CPU noise for sampling.", + "seed_2nd": "Additional noise seed. The behavior is determined by seed_2nd_mode.", + "seed_2nd_mode": "application method of seed_2nd. 1) ignore: Do not use seed_2nd. In the base only sampling stage, the seed is applied as a noise seed, and in the regional sampling stage, denoising is performed as it is without additional noise. 2) Others: In the base only sampling stage, the seed is applied as a noise seed, and once it is closed so that there is no leftover noise, new noise is added with seed_2nd and the regional samping stage is performed. a) fixed: Use seed_2nd as it is as an additional noise seed. b) seed+seed_2nd: Apply the value of seed+seed_2nd as an additional noise seed. c) seed-seed_2nd: Apply the value of seed-seed_2nd as an additional noise seed. d) increment: Not implemented yet. Same with fixed. e) decrement: Not implemented yet. Same with fixed. f) randomize: Not implemented yet. Same with fixed.", + "steps": "total sampling steps", + "base_only_steps": "total sampling steps", + "denoise": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned.", + "samples": "input latent image", + "base_sampler": "The sampler applied outside the area set by the regional_prompt.", + "regional_prompts": "The prompt applied to each region", + "overlap_factor": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions.", + "restore_latent": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true.", + "additional_mode": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change.", + "additional_sampler": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler.", + "additional_sigma_ratio": "Multiplier of noise schedule to be applied according to additional_mode.", + }, + "output": ("result latent", ) + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def separated_sample(*args, **kwargs): + return separated_sample(*args, **kwargs) + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + @staticmethod + def doit(seed, seed_2nd, seed_2nd_mode, steps, base_only_steps, denoise, samples, base_sampler, regional_prompts, overlap_factor, restore_latent, + additional_mode, additional_sampler, additional_sigma_ratio, unique_id=None): + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + region_len = len(regional_prompts) + total = steps*region_len + + leftover_noise = False + if base_only_steps > 0: + if seed_2nd_mode == 'ignore': + leftover_noise = True + + noise = Noise_RandomNoise(seed).generate_noise(samples) + + for rp in regional_prompts: + noise = rp.touch_noise(noise) + + samples = base_sampler.sample_advanced(True, seed, adv_steps, samples, start_at_step, start_at_step + base_only_steps, leftover_noise, recovery_mode="DISABLE", noise=noise) + + if seed_2nd_mode == "seed+seed_2nd": + seed += seed_2nd + if seed > 1125899906842624: + seed = seed - 1125899906842624 + elif seed_2nd_mode == "seed-seed_2nd": + seed -= seed_2nd + if seed < 0: + seed += 1125899906842624 + elif seed_2nd_mode != 'ignore': + seed = seed_2nd + + new_latent_image = samples.copy() + base_latent_image = None + + if not leftover_noise: + add_noise = True + noise = Noise_RandomNoise(seed).generate_noise(samples) + + for rp in regional_prompts: + noise = rp.touch_noise(noise) + else: + add_noise = False + noise = None + + for i in range(start_at_step+base_only_steps, adv_steps): + core.update_node_status(unique_id, f"{i}/{steps} steps | ", ((i-start_at_step)*region_len)/total) + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, + start_at_step=i, end_at_step=i + 1, return_with_leftover_noise=True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise) + + if restore_latent: + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{i}/{steps} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced(False, seed, adv_steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + add_noise = False + + # finalize + core.update_node_status(unique_id, f"finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(False, seed, adv_steps, new_latent_image, adv_steps, adv_steps+1, False, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + core.update_node_status(unique_id, f"{steps}/{steps} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class RegionalSamplerAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "latent_image": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "regional_prompts": ("REGIONAL_PROMPTS", ), + "additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between"}), + "additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"],), + "additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + TOOLTIPS = { + "input": { + "add_noise": "Whether to add noise", + "noise_seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "start_at_step": "The starting step of the sampling to be applied at this node within the range of 'steps'.", + "end_at_step": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps).", + "overlap_factor": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions.", + "restore_latent": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true.", + "return_with_leftover_noise": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it.", + "latent_image": "input latent image", + "base_sampler": "The sampler applied outside the area set by the regional_prompt.", + "regional_prompts": "The prompt applied to each region", + "additional_mode": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change.", + "additional_sampler": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler.", + "additional_sigma_ratio": "Multiplier of noise schedule to be applied according to additional_mode.", + }, + "output": ("result latent", ) + } + + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(add_noise, noise_seed, steps, start_at_step, end_at_step, overlap_factor, restore_latent, return_with_leftover_noise, latent_image, base_sampler, regional_prompts, + additional_mode, additional_sampler, additional_sigma_ratio, unique_id): + + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + region_len = len(regional_prompts) + end_at_step = min(steps, end_at_step) + total = (end_at_step - start_at_step) * region_len + + new_latent_image = latent_image.copy() + base_latent_image = None + region_masks = {} + + for i in range(start_at_step, end_at_step-1): + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | ", ((i-start_at_step)*region_len)/total) + + cur_add_noise = True if i == start_at_step and add_noise else False + + if cur_add_noise: + noise = Noise_RandomNoise(noise_seed).generate_noise(new_latent_image) + for rp in regional_prompts: + noise = rp.touch_noise(noise) + else: + noise = None + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(cur_add_noise, noise_seed, steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + if j not in region_masks: + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + region_masks[j] = region_mask + else: + region_mask = region_masks[j] + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced(False, noise_seed, steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + # finalize + core.update_node_status(unique_id, f"finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(False, noise_seed, steps, new_latent_image, end_at_step-1, end_at_step, return_with_leftover_noise, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + core.update_node_status(unique_id, f"{end_at_step}/{end_at_step} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class KSamplerBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": + { + "scheduler_func_opt": ("SCHEDULER_FUNC", ), + } + } + + TOOLTIPS = { + "input": { + "basic_pipe": "basic_pipe input for sampling", + "seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "cfg": "classifier free guidance value", + "sampler_name": "sampler", + "scheduler": "noise schedule", + "latent_image": "input latent image", + "denoise": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned.", + "scheduler_func_opt": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored.", + }, + "output": ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe") + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "ImpactPack/sampling" + + @staticmethod + def sample(basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise=1.0, scheduler_func_opt=None): + model, clip, vae, positive, negative = basic_pipe + latent = impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=scheduler_func_opt) + return basic_pipe, latent, vae + + +class KSamplerAdvancedBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + "optional": + { + "scheduler_func_opt": ("SCHEDULER_FUNC", ), + } + } + + TOOLTIPS = { + "input": { + "basic_pipe": "basic_pipe input for sampling", + "add_noise": "Whether to add noise", + "noise_seed": "Random seed to use for generating CPU noise for sampling.", + "steps": "total sampling steps", + "cfg": "classifier free guidance value", + "sampler_name": "sampler", + "scheduler": "noise schedule", + "latent_image": "input latent image", + "start_at_step": "The starting step of the sampling to be applied at this node within the range of 'steps'.", + "end_at_step": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps).", + "return_with_leftover_noise": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it.", + "scheduler_func_opt": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored.", + }, + "output": ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe") + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "ImpactPack/sampling" + + @staticmethod + def sample(basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0, scheduler_func_opt=None): + model, clip, vae, positive, negative = basic_pipe + + latent = separated_sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, scheduler_func=scheduler_func_opt) + return basic_pipe, latent, vae + + +class GITSSchedulerFuncProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coeff": ("FLOAT", {"default": 1.20, "min": 0.80, "max": 1.50, "step": 0.05}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + TOOLTIPS = { + "input": { + "coeff": "coeff factor of GITS Scheduler", + "denoise": "denoise amount for noise schedule", + }, + "output": ("Returns a function that generates a noise schedule using GITSScheduler. This can be used in place of a predetermined noise schedule to dynamically generate a noise schedule based on the steps.",) + } + + RETURN_TYPES = ("SCHEDULER_FUNC",) + CATEGORY = "ImpactPack/sampling" + + FUNCTION = "doit" + + @staticmethod + def doit(coeff, denoise): + def f(model, sampler, steps): + if 'GITSScheduler' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception("[Impact Pack] ComfyUI is an outdated version. Cannot use GITSScheduler.") + + scheduler = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']() + return scheduler.get_sigmas(coeff, steps, denoise)[0] + + return (f, ) + + +class NegativeConditioningPlaceholder: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + + TOOLTIPS = { + "output": ("This is a Placeholder for the FLUX model that does not use Negative Conditioning.",) + } + + RETURN_TYPES = ("CONDITIONING",) + CATEGORY = "ImpactPack/sampling" + + FUNCTION = "doit" + + @staticmethod + def doit(): + return ("NegativePlaceholder", ) diff --git a/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ee3087d2174b72d8ba1f672d1000e989ad7295 --- /dev/null +++ b/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py @@ -0,0 +1,586 @@ +from impact.utils import any_typ, ByPassTypeTuple, make_3d_mask +import comfy_extras.nodes_mask +from nodes import MAX_RESOLUTION +import torch +import comfy +import sys +import nodes +import re +from server import PromptServer + + +class GeneralSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1}), + "sel_mode": ("BOOLEAN", {"default": True, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False}), + }, + "optional": { + "input1": (any_typ,), + }, + "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + RETURN_TYPES = (any_typ, "STRING", "INT") + RETURN_NAMES = ("selected_value", "selected_label", "selected_index") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + selected_index = int(kwargs['select']) + input_name = f"input{selected_index}" + + selected_label = input_name + node_id = kwargs['unique_id'] + + if 'extra_pnginfo' in kwargs and kwargs['extra_pnginfo'] is not None: + nodelist = kwargs['extra_pnginfo']['workflow']['nodes'] + for node in nodelist: + if str(node['id']) == node_id: + inputs = node['inputs'] + + for slot in inputs: + if slot['name'] == input_name and 'label' in slot: + selected_label = slot['label'] + + break + else: + print(f"[Impact-Pack] The switch node does not guarantee proper functioning in API mode.") + + if input_name in kwargs: + return (kwargs[input_name], selected_label, selected_index) + else: + print(f"ImpactSwitch: invalid select index (ignored)") + return (None, "", selected_index) + + +class LatentSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "latent1": ("LATENT",), + }, + } + + RETURN_TYPES = ("LATENT", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"latent{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + print(f"LatentSwitch: invalid select index ('latent1' is selected)") + return (kwargs['latent1'],) + + +class ImageMaskSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "images1": ("IMAGE",), + }, + + "optional": { + "mask1_opt": ("MASK",), + "images2_opt": ("IMAGE",), + "mask2_opt": ("MASK",), + "images3_opt": ("IMAGE",), + "mask3_opt": ("MASK",), + "images4_opt": ("IMAGE",), + "mask4_opt": ("MASK",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK",) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None, + images4_opt=None, mask4_opt=None): + if select == 1: + return images1, mask1_opt, + elif select == 2: + return images2_opt, mask2_opt, + elif select == 3: + return images3_opt, mask3_opt, + else: + return images4_opt, mask4_opt, + + +class GeneralInversedSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1}), + "input": (any_typ,), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ByPassTypeTuple((any_typ, )) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, input, unique_id): + res = [] + + for i in range(0, select): + if select == i+1: + res.append(input) + else: + res.append(None) + + return res + + +class RemoveNoiseMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"samples": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, samples): + res = {key: value for key, value in samples.items() if key != 'noise_mask'} + return (res, ) + + +class ImagePasteMasked: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "resize_source": ("BOOLEAN", {"default": False}), + }, + "optional": { + "mask": ("MASK",), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "composite" + + CATEGORY = "image" + + def composite(self, destination, source, x, y, resize_source, mask = None): + destination = destination.clone().movedim(-1, 1) + output = comfy_extras.nodes_mask.composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) + return (output,) + + +from impact.utils import any_typ + +class ImpactLogger: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "data": (any_typ,), + "text": ("STRING", {"multiline": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}, + } + + CATEGORY = "ImpactPack/Debug" + + OUTPUT_NODE = True + + RETURN_TYPES = () + FUNCTION = "doit" + + def doit(self, data, text, prompt, extra_pnginfo, unique_id): + shape = "" + if hasattr(data, "shape"): + shape = f"{data.shape} / " + + print(f"[IMPACT LOGGER]: {shape}{data}") + + print(f" PROMPT: {prompt}") + + # for x in prompt: + # if 'inputs' in x and 'populated_text' in x['inputs']: + # print(f"PROMPT: {x['10']['inputs']['populated_text']}") + # + # for x in extra_pnginfo['workflow']['nodes']: + # if x['type'] == 'ImpactWildcardProcessor': + # print(f" WV : {x['widgets_values'][1]}\n") + + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": unique_id, "widget_name": "text", "type": "TEXT", "value": f"{data}"}) + return {} + + +class ImpactDummyInput: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + + CATEGORY = "ImpactPack/Debug" + + RETURN_TYPES = (any_typ,) + FUNCTION = "doit" + + def doit(self): + return ("DUMMY",) + + +class MasksToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "masks": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, masks): + if masks is None: + empty_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return ([empty_mask], ) + + res = [] + + for mask in masks: + res.append(mask) + + print(f"mask len: {len(res)}") + + res = [make_3d_mask(x) for x in res] + + return (res, ) + + +class MaskListToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("MASK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask): + if len(mask) == 1: + mask = make_3d_mask(mask[0]) + return (mask,) + elif len(mask) > 1: + mask1 = make_3d_mask(mask[0]) + + for mask2 in mask[1:]: + mask2 = make_3d_mask(mask2) + if mask1.shape[1:] != mask2.shape[1:]: + mask2 = comfy.utils.common_upscale(mask2.movedim(-1, 1), mask1.shape[2], mask1.shape[1], "lanczos", "center").movedim(1, -1) + mask1 = torch.cat((mask1, mask2), dim=0) + + return (mask1,) + else: + empty_mask = torch.zeros((1, 64, 64), dtype=torch.float32, device="cpu").unsqueeze(0) + return (empty_mask,) + + +class ImageListToImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, images): + if len(images) <= 1: + return (images,) + else: + image1 = images[0] + for image2 in images[1:]: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "lanczos", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1,) + + +class ImageBatchToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, image): + images = [image[i:i + 1, ...] for i in range(image.shape[0])] + return (images, ) + + +class MakeImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + images = [] + + for k, v in kwargs.items(): + images.append(v) + + return (images, ) + + +class MakeImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + image1 = kwargs['image1'] + del kwargs['image1'] + images = [value for value in kwargs.values()] + + if len(images) == 0: + return (image1,) + else: + for image2 in images: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "lanczos", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1,) + + +class ReencodeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_vae": ("VAE", ), + "output_vae": ("VAE", ), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_vae, output_vae, tile_size=512): + if tile_mode in ["Both", "Decode(input) only"]: + pixels = nodes.VAEDecodeTiled().decode(input_vae, samples, tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(input_vae, samples)[0] + + if tile_mode in ["Both", "Encode(output) only"]: + return nodes.VAEEncodeTiled().encode(output_vae, pixels, tile_size) + else: + return nodes.VAEEncode().encode(output_vae, pixels) + + +class ReencodeLatentPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_basic_pipe": ("BASIC_PIPE", ), + "output_basic_pipe": ("BASIC_PIPE", ), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_basic_pipe, output_basic_pipe): + _, _, input_vae, _, _ = input_basic_pipe + _, _, output_vae, _, _ = output_basic_pipe + return ReencodeLatent().doit(samples, tile_mode, input_vae, output_vae) + + +class StringSelector: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "strings": ("STRING", {"multiline": True}), + "multiline": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "select": ("INT", {"min": 0, "max": sys.maxsize, "step": 1, "default": 0}), + }} + + RETURN_TYPES = ("STRING",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, strings, multiline, select): + lines = strings.split('\n') + + if multiline: + result = [] + current_string = "" + + for line in lines: + if line.startswith("#"): + if current_string: + result.append(current_string.strip()) + current_string = "" + current_string += line + "\n" + + if current_string: + result.append(current_string.strip()) + + if len(result) == 0: + selected = strings + else: + selected = result[select % len(result)] + + if selected.startswith('#'): + selected = selected[1:] + else: + if len(lines) == 0: + selected = strings + else: + selected = lines[select % len(lines)] + + return (selected, ) + + +class StringListToString: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "join_with": ("STRING", {"default": "\\n"}), + "string_list": ("STRING", {"forceInput": True}), + } + } + + INPUT_IS_LIST = True + RETURN_TYPES = ("STRING",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, join_with, string_list): + # convert \\n to newline character + if join_with[0] == "\\n": + join_with[0] = "\n" + + joined_text = join_with[0].join(string_list) + + return (joined_text,) + + +class WildcardPromptFromString: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": ("STRING", {"forceInput": True}), + "delimiter": ("STRING", {"multiline": False, "default": "\\n" }), + "prefix_all": ("STRING", {"multiline": False}), + "postfix_all": ("STRING", {"multiline": False}), + "restrict_to_tags": ("STRING", {"multiline": False}), + "exclude_tags": ("STRING", {"multiline": False}) + }, + } + + RETURN_TYPES = ("STRING", "STRING",) + RETURN_NAMES = ("wildcard", "segs_labels",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, string, delimiter, prefix_all, postfix_all, restrict_to_tags, exclude_tags): + # convert \\n to newline character + if delimiter == "\\n": + delimiter = "\n" + + # some sanity checks and normalization for later processing + if prefix_all is None: + prefix_all = "" + if postfix_all is None: + postfix_all = "" + if restrict_to_tags is None: + restrict_to_tags = "" + if exclude_tags is None: + exclude_tags = "" + + restrict_to_tags = restrict_to_tags.split(", ") + exclude_tags = exclude_tags.split(", ") + + # build the wildcard prompt per list entry + output = ["[LAB]"] + labels = [] + for x in string.split(delimiter): + label = str(len(labels) + 1) + labels.append(label) + x = x.split(", ") + # restrict to tags + if restrict_to_tags != [""]: + x = list(set(x) & set(restrict_to_tags)) + # remove tags + if exclude_tags != [""]: + x = list(set(x) - set(exclude_tags)) + # next row: