mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-03-12 00:30:12 +00:00
Merge branch 'master' into flipflop-stream
This commit is contained in:
@@ -1,26 +1,38 @@
|
||||
import node_helpers
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
class ReferenceLatent:
|
||||
class ReferenceLatent(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"conditioning": ("CONDITIONING", ),
|
||||
},
|
||||
"optional": {"latent": ("LATENT", ),}
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ReferenceLatent",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "append"
|
||||
|
||||
CATEGORY = "advanced/conditioning/edit_models"
|
||||
DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images."
|
||||
|
||||
def append(self, conditioning, latent=None):
|
||||
@classmethod
|
||||
def execute(cls, conditioning, latent=None) -> io.NodeOutput:
|
||||
if latent is not None:
|
||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True)
|
||||
return (conditioning, )
|
||||
return io.NodeOutput(conditioning)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ReferenceLatent": ReferenceLatent,
|
||||
}
|
||||
class EditModelExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
ReferenceLatent,
|
||||
]
|
||||
|
||||
|
||||
def comfy_entrypoint() -> EditModelExtension:
|
||||
return EditModelExtension()
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
class EpsilonScaling:
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
class EpsilonScaling(io.ComfyNode):
|
||||
"""
|
||||
Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models'
|
||||
(https://arxiv.org/abs/2308.15321v6).
|
||||
@@ -8,26 +13,28 @@ class EpsilonScaling:
|
||||
recommended by the paper for its practicality and effectiveness.
|
||||
"""
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"scaling_factor": ("FLOAT", {
|
||||
"default": 1.005,
|
||||
"min": 0.5,
|
||||
"max": 1.5,
|
||||
"step": 0.001,
|
||||
"display": "number"
|
||||
}),
|
||||
}
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Epsilon Scaling",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
"scaling_factor",
|
||||
default=1.005,
|
||||
min=0.5,
|
||||
max=1.5,
|
||||
step=0.001,
|
||||
display_mode=io.NumberDisplay.number,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "model_patches/unet"
|
||||
|
||||
def patch(self, model, scaling_factor):
|
||||
@classmethod
|
||||
def execute(cls, model, scaling_factor) -> io.NodeOutput:
|
||||
# Prevent division by zero, though the UI's min value should prevent this.
|
||||
if scaling_factor == 0:
|
||||
scaling_factor = 1e-9
|
||||
@@ -53,8 +60,15 @@ class EpsilonScaling:
|
||||
|
||||
model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function)
|
||||
|
||||
return (model_clone,)
|
||||
return io.NodeOutput(model_clone)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"Epsilon Scaling": EpsilonScaling
|
||||
}
|
||||
|
||||
class EpsilonScalingExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
EpsilonScaling,
|
||||
]
|
||||
|
||||
async def comfy_entrypoint() -> EpsilonScalingExtension:
|
||||
return EpsilonScalingExtension()
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#Taken from: https://github.com/dbolya/tomesd
|
||||
|
||||
import torch
|
||||
from typing import Tuple, Callable
|
||||
from typing import Tuple, Callable, Optional
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
import math
|
||||
|
||||
def do_nothing(x: torch.Tensor, mode:str=None):
|
||||
@@ -144,33 +146,45 @@ def get_functions(x, ratio, original_shape):
|
||||
|
||||
|
||||
|
||||
class TomePatchModel:
|
||||
class TomePatchModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TomePatchModel",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
)
|
||||
|
||||
CATEGORY = "model_patches/unet"
|
||||
|
||||
def patch(self, model, ratio):
|
||||
self.u = None
|
||||
@classmethod
|
||||
def execute(cls, model, ratio) -> io.NodeOutput:
|
||||
u: Optional[Callable] = None
|
||||
def tomesd_m(q, k, v, extra_options):
|
||||
nonlocal u
|
||||
#NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q
|
||||
#however from my basic testing it seems that using q instead gives better results
|
||||
m, self.u = get_functions(q, ratio, extra_options["original_shape"])
|
||||
m, u = get_functions(q, ratio, extra_options["original_shape"])
|
||||
return m(q), k, v
|
||||
def tomesd_u(n, extra_options):
|
||||
return self.u(n)
|
||||
nonlocal u
|
||||
return u(n)
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_attn1_patch(tomesd_m)
|
||||
m.set_model_attn1_output_patch(tomesd_u)
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"TomePatchModel": TomePatchModel,
|
||||
}
|
||||
class TomePatchModelExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
TomePatchModel,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> TomePatchModelExtension:
|
||||
return TomePatchModelExtension()
|
||||
|
||||
@@ -1,23 +1,39 @@
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from comfy_api.torch_helpers import set_torch_compile_wrapper
|
||||
|
||||
|
||||
class TorchCompileModel:
|
||||
class TorchCompileModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"backend": (["inductor", "cudagraphs"],),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="TorchCompileModel",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input(
|
||||
"backend",
|
||||
options=["inductor", "cudagraphs"],
|
||||
),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
CATEGORY = "_for_testing"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def patch(self, model, backend):
|
||||
@classmethod
|
||||
def execute(cls, model, backend) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
set_torch_compile_wrapper(model=m, backend=backend)
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"TorchCompileModel": TorchCompileModel,
|
||||
}
|
||||
|
||||
class TorchCompileExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
TorchCompileModel,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> TorchCompileExtension:
|
||||
return TorchCompileExtension()
|
||||
|
||||
Reference in New Issue
Block a user