mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-17 21:50:09 +00:00
Compare commits
14 Commits
v0.3.64
...
v3-process
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81e4dac107 | ||
|
|
90853fb9cd | ||
|
|
f1dd6e50f8 | ||
|
|
fc0fbf141c | ||
|
|
f3d5d328a3 | ||
|
|
139addd53c | ||
|
|
cbee7d3390 | ||
|
|
6732014a0a | ||
|
|
989f715d92 | ||
|
|
2ba8d7cce8 | ||
|
|
51fb505ffa | ||
|
|
72c2071972 | ||
|
|
6e59934089 | ||
|
|
3e0eb8d33f |
@@ -123,16 +123,30 @@ def move_weight_functions(m, device):
|
||||
return memory
|
||||
|
||||
class LowVramPatch:
|
||||
def __init__(self, key, patches):
|
||||
def __init__(self, key, patches, convert_func=None, set_func=None):
|
||||
self.key = key
|
||||
self.patches = patches
|
||||
self.convert_func = convert_func
|
||||
self.set_func = set_func
|
||||
|
||||
def __call__(self, weight):
|
||||
intermediate_dtype = weight.dtype
|
||||
if self.convert_func is not None:
|
||||
weight = self.convert_func(weight.to(dtype=torch.float32, copy=True), inplace=True)
|
||||
|
||||
if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops
|
||||
intermediate_dtype = torch.float32
|
||||
return comfy.float.stochastic_rounding(comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype), weight.dtype, seed=string_to_seed(self.key))
|
||||
out = comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype)
|
||||
if self.set_func is None:
|
||||
return comfy.float.stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key))
|
||||
else:
|
||||
return self.set_func(out, seed=string_to_seed(self.key), return_weight=True)
|
||||
|
||||
return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype)
|
||||
out = comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype)
|
||||
if self.set_func is not None:
|
||||
return self.set_func(out, seed=string_to_seed(self.key), return_weight=True).to(dtype=intermediate_dtype)
|
||||
else:
|
||||
return out
|
||||
|
||||
def get_key_weight(model, key):
|
||||
set_func = None
|
||||
@@ -657,13 +671,15 @@ class ModelPatcher:
|
||||
if force_patch_weights:
|
||||
self.patch_weight_to_device(weight_key)
|
||||
else:
|
||||
m.weight_function = [LowVramPatch(weight_key, self.patches)]
|
||||
_, set_func, convert_func = get_key_weight(self.model, weight_key)
|
||||
m.weight_function = [LowVramPatch(weight_key, self.patches, convert_func, set_func)]
|
||||
patch_counter += 1
|
||||
if bias_key in self.patches:
|
||||
if force_patch_weights:
|
||||
self.patch_weight_to_device(bias_key)
|
||||
else:
|
||||
m.bias_function = [LowVramPatch(bias_key, self.patches)]
|
||||
_, set_func, convert_func = get_key_weight(self.model, bias_key)
|
||||
m.bias_function = [LowVramPatch(bias_key, self.patches, convert_func, set_func)]
|
||||
patch_counter += 1
|
||||
|
||||
cast_weight = True
|
||||
@@ -825,10 +841,12 @@ class ModelPatcher:
|
||||
module_mem += move_weight_functions(m, device_to)
|
||||
if lowvram_possible:
|
||||
if weight_key in self.patches:
|
||||
m.weight_function.append(LowVramPatch(weight_key, self.patches))
|
||||
_, set_func, convert_func = get_key_weight(self.model, weight_key)
|
||||
m.weight_function.append(LowVramPatch(weight_key, self.patches, convert_func, set_func))
|
||||
patch_counter += 1
|
||||
if bias_key in self.patches:
|
||||
m.bias_function.append(LowVramPatch(bias_key, self.patches))
|
||||
_, set_func, convert_func = get_key_weight(self.model, bias_key)
|
||||
m.bias_function.append(LowVramPatch(bias_key, self.patches, convert_func, set_func))
|
||||
patch_counter += 1
|
||||
cast_weight = True
|
||||
|
||||
|
||||
@@ -21,17 +21,23 @@ def rescale_zero_terminal_snr_sigmas(sigmas):
|
||||
alphas_bar[-1] = 4.8973451890853435e-08
|
||||
return ((1 - alphas_bar) / alphas_bar) ** 0.5
|
||||
|
||||
def reshape_sigma(sigma, noise_dim):
|
||||
if sigma.nelement() == 1:
|
||||
return sigma.view(())
|
||||
else:
|
||||
return sigma.view(sigma.shape[:1] + (1,) * (noise_dim - 1))
|
||||
|
||||
class EPS:
|
||||
def calculate_input(self, sigma, noise):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, noise.ndim)
|
||||
return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, model_output.ndim)
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, noise.ndim)
|
||||
if max_denoise:
|
||||
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
|
||||
else:
|
||||
@@ -45,12 +51,12 @@ class EPS:
|
||||
|
||||
class V_PREDICTION(EPS):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, model_output.ndim)
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
class EDM(V_PREDICTION):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, model_output.ndim)
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
class CONST:
|
||||
@@ -58,15 +64,15 @@ class CONST:
|
||||
return noise
|
||||
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, model_output.ndim)
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, noise.ndim)
|
||||
return sigma * noise + (1.0 - sigma) * latent_image
|
||||
|
||||
def inverse_noise_scaling(self, sigma, latent):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (latent.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, latent.ndim)
|
||||
return latent / (1.0 - sigma)
|
||||
|
||||
class X0(EPS):
|
||||
@@ -80,16 +86,16 @@ class IMG_TO_IMG(X0):
|
||||
class COSMOS_RFLOW:
|
||||
def calculate_input(self, sigma, noise):
|
||||
sigma = (sigma / (sigma + 1))
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, noise.ndim)
|
||||
return noise * (1.0 - sigma)
|
||||
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = (sigma / (sigma + 1))
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, model_output.ndim)
|
||||
return model_input * (1.0 - sigma) - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
sigma = reshape_sigma(sigma, noise.ndim)
|
||||
noise = noise * sigma
|
||||
noise += latent_image
|
||||
return noise
|
||||
|
||||
@@ -416,8 +416,10 @@ def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None
|
||||
else:
|
||||
return weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype)
|
||||
|
||||
def set_weight(self, weight, inplace_update=False, seed=None, **kwargs):
|
||||
def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs):
|
||||
weight = comfy.float.stochastic_rounding(weight / self.scale_weight.to(device=weight.device, dtype=weight.dtype), self.weight.dtype, seed=seed)
|
||||
if return_weight:
|
||||
return weight
|
||||
if inplace_update:
|
||||
self.weight.data.copy_(weight)
|
||||
else:
|
||||
|
||||
@@ -8,8 +8,8 @@ from comfy_api.internal.async_to_sync import create_sync_class
|
||||
from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput
|
||||
from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents
|
||||
from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents
|
||||
from comfy_api.latest._io import _IO as io #noqa: F401
|
||||
from comfy_api.latest._ui import _UI as ui #noqa: F401
|
||||
from . import _io as io
|
||||
from . import _ui as ui
|
||||
# from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401
|
||||
from comfy_execution.utils import get_executing_context
|
||||
from comfy_execution.progress import get_progress_state, PreviewImageTuple
|
||||
@@ -114,6 +114,8 @@ if TYPE_CHECKING:
|
||||
ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub]
|
||||
ComfyAPISync = create_sync_class(ComfyAPI_latest)
|
||||
|
||||
comfy_io = io # create the new alias for io
|
||||
|
||||
__all__ = [
|
||||
"ComfyAPI",
|
||||
"ComfyAPISync",
|
||||
@@ -121,4 +123,7 @@ __all__ = [
|
||||
"InputImpl",
|
||||
"Types",
|
||||
"ComfyExtension",
|
||||
"io",
|
||||
"comfy_io",
|
||||
"ui",
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Union
|
||||
from typing import Optional, Union, IO
|
||||
import io
|
||||
import av
|
||||
from comfy_api.util import VideoContainer, VideoCodec, VideoComponents
|
||||
@@ -23,7 +23,7 @@ class VideoInput(ABC):
|
||||
@abstractmethod
|
||||
def save_to(
|
||||
self,
|
||||
path: str,
|
||||
path: Union[str, IO[bytes]],
|
||||
format: VideoContainer = VideoContainer.AUTO,
|
||||
codec: VideoCodec = VideoCodec.AUTO,
|
||||
metadata: Optional[dict] = None
|
||||
|
||||
@@ -336,11 +336,25 @@ class Combo(ComfyTypeIO):
|
||||
class Input(WidgetInput):
|
||||
"""Combo input (dropdown)."""
|
||||
Type = str
|
||||
def __init__(self, id: str, options: list[str]=None, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: str=None, control_after_generate: bool=None,
|
||||
upload: UploadType=None, image_folder: FolderType=None,
|
||||
remote: RemoteOptions=None,
|
||||
socketless: bool=None):
|
||||
def __init__(
|
||||
self,
|
||||
id: str,
|
||||
options: list[str] | list[int] | type[Enum] = None,
|
||||
display_name: str=None,
|
||||
optional=False,
|
||||
tooltip: str=None,
|
||||
lazy: bool=None,
|
||||
default: str | int | Enum = None,
|
||||
control_after_generate: bool=None,
|
||||
upload: UploadType=None,
|
||||
image_folder: FolderType=None,
|
||||
remote: RemoteOptions=None,
|
||||
socketless: bool=None,
|
||||
):
|
||||
if isinstance(options, type) and issubclass(options, Enum):
|
||||
options = [v.value for v in options]
|
||||
if isinstance(default, Enum):
|
||||
default = default.value
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless)
|
||||
self.multiselect = False
|
||||
self.options = options
|
||||
@@ -1568,78 +1582,78 @@ class _UIOutput(ABC):
|
||||
...
|
||||
|
||||
|
||||
class _IO:
|
||||
FolderType = FolderType
|
||||
UploadType = UploadType
|
||||
RemoteOptions = RemoteOptions
|
||||
NumberDisplay = NumberDisplay
|
||||
__all__ = [
|
||||
"FolderType",
|
||||
"UploadType",
|
||||
"RemoteOptions",
|
||||
"NumberDisplay",
|
||||
|
||||
comfytype = staticmethod(comfytype)
|
||||
Custom = staticmethod(Custom)
|
||||
Input = Input
|
||||
WidgetInput = WidgetInput
|
||||
Output = Output
|
||||
ComfyTypeI = ComfyTypeI
|
||||
ComfyTypeIO = ComfyTypeIO
|
||||
#---------------------------------
|
||||
"comfytype",
|
||||
"Custom",
|
||||
"Input",
|
||||
"WidgetInput",
|
||||
"Output",
|
||||
"ComfyTypeI",
|
||||
"ComfyTypeIO",
|
||||
# Supported Types
|
||||
Boolean = Boolean
|
||||
Int = Int
|
||||
Float = Float
|
||||
String = String
|
||||
Combo = Combo
|
||||
MultiCombo = MultiCombo
|
||||
Image = Image
|
||||
WanCameraEmbedding = WanCameraEmbedding
|
||||
Webcam = Webcam
|
||||
Mask = Mask
|
||||
Latent = Latent
|
||||
Conditioning = Conditioning
|
||||
Sampler = Sampler
|
||||
Sigmas = Sigmas
|
||||
Noise = Noise
|
||||
Guider = Guider
|
||||
Clip = Clip
|
||||
ControlNet = ControlNet
|
||||
Vae = Vae
|
||||
Model = Model
|
||||
ClipVision = ClipVision
|
||||
ClipVisionOutput = ClipVisionOutput
|
||||
AudioEncoder = AudioEncoder
|
||||
AudioEncoderOutput = AudioEncoderOutput
|
||||
StyleModel = StyleModel
|
||||
Gligen = Gligen
|
||||
UpscaleModel = UpscaleModel
|
||||
Audio = Audio
|
||||
Video = Video
|
||||
SVG = SVG
|
||||
LoraModel = LoraModel
|
||||
LossMap = LossMap
|
||||
Voxel = Voxel
|
||||
Mesh = Mesh
|
||||
Hooks = Hooks
|
||||
HookKeyframes = HookKeyframes
|
||||
TimestepsRange = TimestepsRange
|
||||
LatentOperation = LatentOperation
|
||||
FlowControl = FlowControl
|
||||
Accumulation = Accumulation
|
||||
Load3DCamera = Load3DCamera
|
||||
Load3D = Load3D
|
||||
Load3DAnimation = Load3DAnimation
|
||||
Photomaker = Photomaker
|
||||
Point = Point
|
||||
FaceAnalysis = FaceAnalysis
|
||||
BBOX = BBOX
|
||||
SEGS = SEGS
|
||||
AnyType = AnyType
|
||||
MultiType = MultiType
|
||||
#---------------------------------
|
||||
HiddenHolder = HiddenHolder
|
||||
Hidden = Hidden
|
||||
NodeInfoV1 = NodeInfoV1
|
||||
NodeInfoV3 = NodeInfoV3
|
||||
Schema = Schema
|
||||
ComfyNode = ComfyNode
|
||||
NodeOutput = NodeOutput
|
||||
add_to_dict_v1 = staticmethod(add_to_dict_v1)
|
||||
add_to_dict_v3 = staticmethod(add_to_dict_v3)
|
||||
"Boolean",
|
||||
"Int",
|
||||
"Float",
|
||||
"String",
|
||||
"Combo",
|
||||
"MultiCombo",
|
||||
"Image",
|
||||
"WanCameraEmbedding",
|
||||
"Webcam",
|
||||
"Mask",
|
||||
"Latent",
|
||||
"Conditioning",
|
||||
"Sampler",
|
||||
"Sigmas",
|
||||
"Noise",
|
||||
"Guider",
|
||||
"Clip",
|
||||
"ControlNet",
|
||||
"Vae",
|
||||
"Model",
|
||||
"ClipVision",
|
||||
"ClipVisionOutput",
|
||||
"AudioEncoder",
|
||||
"AudioEncoderOutput",
|
||||
"StyleModel",
|
||||
"Gligen",
|
||||
"UpscaleModel",
|
||||
"Audio",
|
||||
"Video",
|
||||
"SVG",
|
||||
"LoraModel",
|
||||
"LossMap",
|
||||
"Voxel",
|
||||
"Mesh",
|
||||
"Hooks",
|
||||
"HookKeyframes",
|
||||
"TimestepsRange",
|
||||
"LatentOperation",
|
||||
"FlowControl",
|
||||
"Accumulation",
|
||||
"Load3DCamera",
|
||||
"Load3D",
|
||||
"Load3DAnimation",
|
||||
"Photomaker",
|
||||
"Point",
|
||||
"FaceAnalysis",
|
||||
"BBOX",
|
||||
"SEGS",
|
||||
"AnyType",
|
||||
"MultiType",
|
||||
# Other classes
|
||||
"HiddenHolder",
|
||||
"Hidden",
|
||||
"NodeInfoV1",
|
||||
"NodeInfoV3",
|
||||
"Schema",
|
||||
"ComfyNode",
|
||||
"NodeOutput",
|
||||
"add_to_dict_v1",
|
||||
"add_to_dict_v3",
|
||||
]
|
||||
|
||||
@@ -449,15 +449,16 @@ class PreviewText(_UIOutput):
|
||||
return {"text": (self.value,)}
|
||||
|
||||
|
||||
class _UI:
|
||||
SavedResult = SavedResult
|
||||
SavedImages = SavedImages
|
||||
SavedAudios = SavedAudios
|
||||
ImageSaveHelper = ImageSaveHelper
|
||||
AudioSaveHelper = AudioSaveHelper
|
||||
PreviewImage = PreviewImage
|
||||
PreviewMask = PreviewMask
|
||||
PreviewAudio = PreviewAudio
|
||||
PreviewVideo = PreviewVideo
|
||||
PreviewUI3D = PreviewUI3D
|
||||
PreviewText = PreviewText
|
||||
__all__ = [
|
||||
"SavedResult",
|
||||
"SavedImages",
|
||||
"SavedAudios",
|
||||
"ImageSaveHelper",
|
||||
"AudioSaveHelper",
|
||||
"PreviewImage",
|
||||
"PreviewMask",
|
||||
"PreviewAudio",
|
||||
"PreviewVideo",
|
||||
"PreviewUI3D",
|
||||
"PreviewText",
|
||||
]
|
||||
|
||||
@@ -269,7 +269,7 @@ def tensor_to_bytesio(
|
||||
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
|
||||
|
||||
Returns:
|
||||
Named BytesIO object containing the image data.
|
||||
Named BytesIO object containing the image data, with pointer set to the start of buffer.
|
||||
"""
|
||||
if not mime_type:
|
||||
mime_type = "image/png"
|
||||
@@ -431,7 +431,7 @@ async def upload_video_to_comfyapi(
|
||||
f"Video duration ({actual_duration:.2f}s) exceeds the maximum allowed ({max_duration}s)."
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error getting video duration: {e}")
|
||||
logging.error("Error getting video duration: %s", str(e))
|
||||
raise ValueError(f"Could not verify video duration from source: {e}") from e
|
||||
|
||||
upload_mime_type = f"video/{container.value.lower()}"
|
||||
|
||||
@@ -98,7 +98,7 @@ import io
|
||||
import os
|
||||
import socket
|
||||
from aiohttp.client_exceptions import ClientError, ClientResponseError
|
||||
from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple
|
||||
from typing import Type, Optional, Any, TypeVar, Generic, Callable
|
||||
from enum import Enum
|
||||
import json
|
||||
from urllib.parse import urljoin, urlparse
|
||||
@@ -175,7 +175,7 @@ class ApiClient:
|
||||
max_retries: int = 3,
|
||||
retry_delay: float = 1.0,
|
||||
retry_backoff_factor: float = 2.0,
|
||||
retry_status_codes: Optional[Tuple[int, ...]] = None,
|
||||
retry_status_codes: Optional[tuple[int, ...]] = None,
|
||||
session: Optional[aiohttp.ClientSession] = None,
|
||||
):
|
||||
self.base_url = base_url
|
||||
@@ -199,9 +199,9 @@ class ApiClient:
|
||||
|
||||
@staticmethod
|
||||
def _create_json_payload_args(
|
||||
data: Optional[Dict[str, Any]] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
data: Optional[dict[str, Any]] = None,
|
||||
headers: Optional[dict[str, str]] = None,
|
||||
) -> dict[str, Any]:
|
||||
return {
|
||||
"json": data,
|
||||
"headers": headers,
|
||||
@@ -209,11 +209,11 @@ class ApiClient:
|
||||
|
||||
def _create_form_data_args(
|
||||
self,
|
||||
data: Dict[str, Any] | None,
|
||||
files: Dict[str, Any] | None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
data: dict[str, Any] | None,
|
||||
files: dict[str, Any] | None,
|
||||
headers: Optional[dict[str, str]] = None,
|
||||
multipart_parser: Callable | None = None,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
if headers and "Content-Type" in headers:
|
||||
del headers["Content-Type"]
|
||||
|
||||
@@ -254,9 +254,9 @@ class ApiClient:
|
||||
|
||||
@staticmethod
|
||||
def _create_urlencoded_form_data_args(
|
||||
data: Dict[str, Any],
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
data: dict[str, Any],
|
||||
headers: Optional[dict[str, str]] = None,
|
||||
) -> dict[str, Any]:
|
||||
headers = headers or {}
|
||||
headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
return {
|
||||
@@ -264,7 +264,7 @@ class ApiClient:
|
||||
"headers": headers,
|
||||
}
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
def get_headers(self) -> dict[str, str]:
|
||||
"""Get headers for API requests, including authentication if available"""
|
||||
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
||||
|
||||
@@ -275,7 +275,7 @@ class ApiClient:
|
||||
|
||||
return headers
|
||||
|
||||
async def _check_connectivity(self, target_url: str) -> Dict[str, bool]:
|
||||
async def _check_connectivity(self, target_url: str) -> dict[str, bool]:
|
||||
"""
|
||||
Check connectivity to determine if network issues are local or server-related.
|
||||
|
||||
@@ -316,14 +316,14 @@ class ApiClient:
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
data: Optional[Dict[str, Any]] = None,
|
||||
files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
params: Optional[dict[str, Any]] = None,
|
||||
data: Optional[dict[str, Any]] = None,
|
||||
files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None,
|
||||
headers: Optional[dict[str, str]] = None,
|
||||
content_type: str = "application/json",
|
||||
multipart_parser: Callable | None = None,
|
||||
retry_count: int = 0, # Used internally for tracking retries
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Make an HTTP request to the API with automatic retries for transient errors.
|
||||
|
||||
@@ -359,10 +359,10 @@ class ApiClient:
|
||||
if params:
|
||||
params = {k: v for k, v in params.items() if v is not None} # aiohttp fails to serialize None values
|
||||
|
||||
logging.debug(f"[DEBUG] Request Headers: {request_headers}")
|
||||
logging.debug(f"[DEBUG] Files: {files}")
|
||||
logging.debug(f"[DEBUG] Params: {params}")
|
||||
logging.debug(f"[DEBUG] Data: {data}")
|
||||
logging.debug("[DEBUG] Request Headers: %s", request_headers)
|
||||
logging.debug("[DEBUG] Files: %s", files)
|
||||
logging.debug("[DEBUG] Params: %s", params)
|
||||
logging.debug("[DEBUG] Data: %s", data)
|
||||
|
||||
if content_type == "application/x-www-form-urlencoded":
|
||||
payload_args = self._create_urlencoded_form_data_args(data or {}, request_headers)
|
||||
@@ -485,7 +485,7 @@ class ApiClient:
|
||||
retry_delay: Initial delay between retries in seconds
|
||||
retry_backoff_factor: Multiplier for the delay after each retry
|
||||
"""
|
||||
headers: Dict[str, str] = {}
|
||||
headers: dict[str, str] = {}
|
||||
skip_auto_headers: set[str] = set()
|
||||
if content_type:
|
||||
headers["Content-Type"] = content_type
|
||||
@@ -558,7 +558,7 @@ class ApiClient:
|
||||
*req_meta,
|
||||
retry_count: int,
|
||||
response_content: dict | str = "",
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
status_code = exc.status
|
||||
if status_code == 401:
|
||||
user_friendly = "Unauthorized: Please login first to use this node."
|
||||
@@ -592,9 +592,9 @@ class ApiClient:
|
||||
error_message=f"HTTP Error {exc.status}",
|
||||
)
|
||||
|
||||
logging.debug(f"[DEBUG] API Error: {user_friendly} (Status: {status_code})")
|
||||
logging.debug("[DEBUG] API Error: %s (Status: %s)", user_friendly, status_code)
|
||||
if response_content:
|
||||
logging.debug(f"[DEBUG] Response content: {response_content}")
|
||||
logging.debug("[DEBUG] Response content: %s", response_content)
|
||||
|
||||
# Retry if eligible
|
||||
if status_code in self.retry_status_codes and retry_count < self.max_retries:
|
||||
@@ -659,7 +659,7 @@ class ApiEndpoint(Generic[T, R]):
|
||||
method: HttpMethod,
|
||||
request_model: Type[T],
|
||||
response_model: Type[R],
|
||||
query_params: Optional[Dict[str, Any]] = None,
|
||||
query_params: Optional[dict[str, Any]] = None,
|
||||
):
|
||||
"""Initialize an API endpoint definition.
|
||||
|
||||
@@ -684,11 +684,11 @@ class SynchronousOperation(Generic[T, R]):
|
||||
self,
|
||||
endpoint: ApiEndpoint[T, R],
|
||||
request: T,
|
||||
files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None,
|
||||
files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None,
|
||||
api_base: str | None = None,
|
||||
auth_token: Optional[str] = None,
|
||||
comfy_api_key: Optional[str] = None,
|
||||
auth_kwargs: Optional[Dict[str, str]] = None,
|
||||
auth_kwargs: Optional[dict[str, str]] = None,
|
||||
timeout: float = 7200.0,
|
||||
verify_ssl: bool = True,
|
||||
content_type: str = "application/json",
|
||||
@@ -729,7 +729,7 @@ class SynchronousOperation(Generic[T, R]):
|
||||
)
|
||||
|
||||
try:
|
||||
request_dict: Optional[Dict[str, Any]]
|
||||
request_dict: Optional[dict[str, Any]]
|
||||
if isinstance(self.request, EmptyRequest):
|
||||
request_dict = None
|
||||
else:
|
||||
@@ -738,11 +738,9 @@ class SynchronousOperation(Generic[T, R]):
|
||||
if isinstance(v, Enum):
|
||||
request_dict[k] = v.value
|
||||
|
||||
logging.debug(
|
||||
f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}"
|
||||
)
|
||||
logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}")
|
||||
logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}")
|
||||
logging.debug("[DEBUG] API Request: %s %s", self.endpoint.method.value, self.endpoint.path)
|
||||
logging.debug("[DEBUG] Request Data: %s", json.dumps(request_dict, indent=2))
|
||||
logging.debug("[DEBUG] Query Params: %s", self.endpoint.query_params)
|
||||
|
||||
response_json = await client.request(
|
||||
self.endpoint.method.value,
|
||||
@@ -757,11 +755,11 @@ class SynchronousOperation(Generic[T, R]):
|
||||
logging.debug("=" * 50)
|
||||
logging.debug("[DEBUG] RESPONSE DETAILS:")
|
||||
logging.debug("[DEBUG] Status Code: 200 (Success)")
|
||||
logging.debug(f"[DEBUG] Response Body: {json.dumps(response_json, indent=2)}")
|
||||
logging.debug("[DEBUG] Response Body: %s", json.dumps(response_json, indent=2))
|
||||
logging.debug("=" * 50)
|
||||
|
||||
parsed_response = self.endpoint.response_model.model_validate(response_json)
|
||||
logging.debug(f"[DEBUG] Parsed Response: {parsed_response}")
|
||||
logging.debug("[DEBUG] Parsed Response: %s", parsed_response)
|
||||
return parsed_response
|
||||
finally:
|
||||
if owns_client:
|
||||
@@ -784,14 +782,14 @@ class PollingOperation(Generic[T, R]):
|
||||
poll_endpoint: ApiEndpoint[EmptyRequest, R],
|
||||
completed_statuses: list[str],
|
||||
failed_statuses: list[str],
|
||||
status_extractor: Callable[[R], str],
|
||||
progress_extractor: Callable[[R], float] | None = None,
|
||||
result_url_extractor: Callable[[R], str] | None = None,
|
||||
status_extractor: Callable[[R], Optional[str]],
|
||||
progress_extractor: Callable[[R], Optional[float]] | None = None,
|
||||
result_url_extractor: Callable[[R], Optional[str]] | None = None,
|
||||
request: Optional[T] = None,
|
||||
api_base: str | None = None,
|
||||
auth_token: Optional[str] = None,
|
||||
comfy_api_key: Optional[str] = None,
|
||||
auth_kwargs: Optional[Dict[str, str]] = None,
|
||||
auth_kwargs: Optional[dict[str, str]] = None,
|
||||
poll_interval: float = 5.0,
|
||||
max_poll_attempts: int = 120, # Default max polling attempts (10 minutes with 5s interval)
|
||||
max_retries: int = 3, # Max retries per individual API call
|
||||
@@ -877,7 +875,7 @@ class PollingOperation(Generic[T, R]):
|
||||
status = TaskStatus.PENDING
|
||||
for poll_count in range(1, self.max_poll_attempts + 1):
|
||||
try:
|
||||
logging.debug(f"[DEBUG] Polling attempt #{poll_count}")
|
||||
logging.debug("[DEBUG] Polling attempt #%s", poll_count)
|
||||
|
||||
request_dict = (
|
||||
None if self.request is None else self.request.model_dump(exclude_none=True)
|
||||
@@ -885,10 +883,13 @@ class PollingOperation(Generic[T, R]):
|
||||
|
||||
if poll_count == 1:
|
||||
logging.debug(
|
||||
f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}"
|
||||
"[DEBUG] Poll Request: %s %s",
|
||||
self.poll_endpoint.method.value,
|
||||
self.poll_endpoint.path,
|
||||
)
|
||||
logging.debug(
|
||||
f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}"
|
||||
"[DEBUG] Poll Request Data: %s",
|
||||
json.dumps(request_dict, indent=2) if request_dict else "None",
|
||||
)
|
||||
|
||||
# Query task status
|
||||
@@ -903,7 +904,7 @@ class PollingOperation(Generic[T, R]):
|
||||
|
||||
# Check if task is complete
|
||||
status = self._check_task_status(response_obj)
|
||||
logging.debug(f"[DEBUG] Task Status: {status}")
|
||||
logging.debug("[DEBUG] Task Status: %s", status)
|
||||
|
||||
# If progress extractor is provided, extract progress
|
||||
if self.progress_extractor:
|
||||
@@ -917,7 +918,7 @@ class PollingOperation(Generic[T, R]):
|
||||
result_url = self.result_url_extractor(response_obj)
|
||||
if result_url:
|
||||
message = f"Result URL: {result_url}"
|
||||
logging.debug(f"[DEBUG] {message}")
|
||||
logging.debug("[DEBUG] %s", message)
|
||||
self._display_text_on_node(message)
|
||||
self.final_response = response_obj
|
||||
if self.progress_extractor:
|
||||
@@ -925,7 +926,7 @@ class PollingOperation(Generic[T, R]):
|
||||
return self.final_response
|
||||
if status == TaskStatus.FAILED:
|
||||
message = f"Task failed: {json.dumps(resp)}"
|
||||
logging.error(f"[DEBUG] {message}")
|
||||
logging.error("[DEBUG] %s", message)
|
||||
raise Exception(message)
|
||||
logging.debug("[DEBUG] Task still pending, continuing to poll...")
|
||||
# Task pending – wait
|
||||
@@ -939,7 +940,12 @@ class PollingOperation(Generic[T, R]):
|
||||
raise Exception(
|
||||
f"Polling aborted after {consecutive_errors} network errors: {str(e)}"
|
||||
) from e
|
||||
logging.warning("Network error (%s/%s): %s", consecutive_errors, max_consecutive_errors, str(e))
|
||||
logging.warning(
|
||||
"Network error (%s/%s): %s",
|
||||
consecutive_errors,
|
||||
max_consecutive_errors,
|
||||
str(e),
|
||||
)
|
||||
await asyncio.sleep(self.poll_interval)
|
||||
except Exception as e:
|
||||
# For other errors, increment count and potentially abort
|
||||
@@ -949,10 +955,13 @@ class PollingOperation(Generic[T, R]):
|
||||
f"Polling aborted after {consecutive_errors} consecutive errors: {str(e)}"
|
||||
) from e
|
||||
|
||||
logging.error(f"[DEBUG] Polling error: {str(e)}")
|
||||
logging.error("[DEBUG] Polling error: %s", str(e))
|
||||
logging.warning(
|
||||
f"Error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. "
|
||||
f"Will retry in {self.poll_interval} seconds."
|
||||
"Error during polling (attempt %s/%s): %s. Will retry in %s seconds.",
|
||||
poll_count,
|
||||
self.max_poll_attempts,
|
||||
str(e),
|
||||
self.poll_interval,
|
||||
)
|
||||
await asyncio.sleep(self.poll_interval)
|
||||
|
||||
|
||||
100
comfy_api_nodes/apis/pika_defs.py
Normal file
100
comfy_api_nodes/apis/pika_defs.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from typing import Optional
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Pikaffect(str, Enum):
|
||||
Cake_ify = "Cake-ify"
|
||||
Crumble = "Crumble"
|
||||
Crush = "Crush"
|
||||
Decapitate = "Decapitate"
|
||||
Deflate = "Deflate"
|
||||
Dissolve = "Dissolve"
|
||||
Explode = "Explode"
|
||||
Eye_pop = "Eye-pop"
|
||||
Inflate = "Inflate"
|
||||
Levitate = "Levitate"
|
||||
Melt = "Melt"
|
||||
Peel = "Peel"
|
||||
Poke = "Poke"
|
||||
Squish = "Squish"
|
||||
Ta_da = "Ta-da"
|
||||
Tear = "Tear"
|
||||
|
||||
|
||||
class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel):
|
||||
aspectRatio: Optional[float] = Field(None, description='Aspect ratio (width / height)')
|
||||
duration: Optional[int] = Field(5)
|
||||
ingredientsMode: str = Field(...)
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: Optional[str] = Field(None)
|
||||
resolution: Optional[str] = Field('1080p')
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaGenerateResponse(BaseModel):
|
||||
video_id: str = Field(...)
|
||||
|
||||
|
||||
class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel):
|
||||
duration: Optional[int] = 5
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: Optional[str] = Field(None)
|
||||
resolution: Optional[str] = '1080p'
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel):
|
||||
duration: Optional[int] = Field(None, ge=5, le=10)
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: str = Field(...)
|
||||
resolution: Optional[str] = '1080p'
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel):
|
||||
aspectRatio: Optional[float] = Field(
|
||||
1.7777777777777777,
|
||||
description='Aspect ratio (width / height)',
|
||||
ge=0.4,
|
||||
le=2.5,
|
||||
)
|
||||
duration: Optional[int] = 5
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: str = Field(...)
|
||||
resolution: Optional[str] = '1080p'
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel):
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel):
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
pikaffect: Optional[str] = None
|
||||
promptText: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel):
|
||||
negativePrompt: Optional[str] = Field(None)
|
||||
promptText: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
modifyRegionRoi: Optional[str] = Field(None)
|
||||
|
||||
|
||||
class PikaStatusEnum(str, Enum):
|
||||
queued = "queued"
|
||||
started = "started"
|
||||
finished = "finished"
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class PikaVideoResponse(BaseModel):
|
||||
id: str = Field(...)
|
||||
progress: Optional[int] = Field(None)
|
||||
status: PikaStatusEnum
|
||||
url: Optional[str] = Field(None)
|
||||
@@ -21,7 +21,7 @@ def get_log_directory():
|
||||
try:
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating API log directory {log_dir}: {e}")
|
||||
logger.error("Error creating API log directory %s: %s", log_dir, str(e))
|
||||
# Fallback to base temp directory if sub-directory creation fails
|
||||
return base_temp_dir
|
||||
return log_dir
|
||||
@@ -122,9 +122,9 @@ def log_request_response(
|
||||
try:
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(log_content))
|
||||
logger.debug(f"API log saved to: {filepath}")
|
||||
logger.debug("API log saved to: %s", filepath)
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing API log to {filepath}: {e}")
|
||||
logger.error("Error writing API log to %s: %s", filepath, str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -249,8 +249,8 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in Text2ImageModelName],
|
||||
default=Text2ImageModelName.seedream_3.value,
|
||||
options=Text2ImageModelName,
|
||||
default=Text2ImageModelName.seedream_3,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.String.Input(
|
||||
@@ -382,8 +382,8 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in Image2ImageModelName],
|
||||
default=Image2ImageModelName.seededit_3.value,
|
||||
options=Image2ImageModelName,
|
||||
default=Image2ImageModelName.seededit_3,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
@@ -676,8 +676,8 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in Text2VideoModelName],
|
||||
default=Text2VideoModelName.seedance_1_pro.value,
|
||||
options=Text2VideoModelName,
|
||||
default=Text2VideoModelName.seedance_1_pro,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.String.Input(
|
||||
@@ -793,8 +793,8 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in Image2VideoModelName],
|
||||
default=Image2VideoModelName.seedance_1_pro.value,
|
||||
options=Image2VideoModelName,
|
||||
default=Image2VideoModelName.seedance_1_pro,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.String.Input(
|
||||
|
||||
@@ -296,7 +296,7 @@ def validate_video_result_response(response) -> None:
|
||||
"""Validates that the Kling task result contains a video."""
|
||||
if not is_valid_video_response(response):
|
||||
error_msg = f"Kling task {response.data.task_id} succeeded but no video data found in response."
|
||||
logging.error(f"Error: {error_msg}.\nResponse: {response}")
|
||||
logging.error("Error: %s.\nResponse: %s", error_msg, response)
|
||||
raise Exception(error_msg)
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ def validate_image_result_response(response) -> None:
|
||||
"""Validates that the Kling task result contains an image."""
|
||||
if not is_valid_image_response(response):
|
||||
error_msg = f"Kling task {response.data.task_id} succeeded but no image data found in response."
|
||||
logging.error(f"Error: {error_msg}.\nResponse: {response}")
|
||||
logging.error("Error: %s.\nResponse: %s", error_msg, response)
|
||||
raise Exception(error_msg)
|
||||
|
||||
|
||||
@@ -647,7 +647,7 @@ class KlingCameraControls(comfy_io.ComfyNode):
|
||||
category="api node/video/Kling",
|
||||
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
|
||||
inputs=[
|
||||
comfy_io.Combo.Input("camera_control_type", options=[i.value for i in KlingCameraControlType]),
|
||||
comfy_io.Combo.Input("camera_control_type", options=KlingCameraControlType),
|
||||
comfy_io.Float.Input(
|
||||
"horizontal_movement",
|
||||
default=0.0,
|
||||
@@ -772,7 +772,7 @@ class KlingTextToVideoNode(comfy_io.ComfyNode):
|
||||
comfy_io.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[i.value for i in KlingVideoGenAspectRatio],
|
||||
options=KlingVideoGenAspectRatio,
|
||||
default="16:9",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
@@ -840,7 +840,7 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode):
|
||||
comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[i.value for i in KlingVideoGenAspectRatio],
|
||||
options=KlingVideoGenAspectRatio,
|
||||
default="16:9",
|
||||
),
|
||||
comfy_io.Custom("CAMERA_CONTROL").Input(
|
||||
@@ -903,17 +903,17 @@ class KlingImage2VideoNode(comfy_io.ComfyNode):
|
||||
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
comfy_io.Combo.Input(
|
||||
"model_name",
|
||||
options=[i.value for i in KlingVideoGenModelName],
|
||||
options=KlingVideoGenModelName,
|
||||
default="kling-v2-master",
|
||||
),
|
||||
comfy_io.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0),
|
||||
comfy_io.Combo.Input("mode", options=[i.value for i in KlingVideoGenMode], default="std"),
|
||||
comfy_io.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[i.value for i in KlingVideoGenAspectRatio],
|
||||
default="16:9",
|
||||
options=KlingVideoGenAspectRatio,
|
||||
default=KlingVideoGenAspectRatio.field_16_9,
|
||||
),
|
||||
comfy_io.Combo.Input("duration", options=[i.value for i in KlingVideoGenDuration], default="5"),
|
||||
comfy_io.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5),
|
||||
],
|
||||
outputs=[
|
||||
comfy_io.Video.Output(),
|
||||
@@ -984,8 +984,8 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode):
|
||||
comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[i.value for i in KlingVideoGenAspectRatio],
|
||||
default="16:9",
|
||||
options=KlingVideoGenAspectRatio,
|
||||
default=KlingVideoGenAspectRatio.field_16_9,
|
||||
),
|
||||
comfy_io.Custom("CAMERA_CONTROL").Input(
|
||||
"camera_control",
|
||||
|
||||
@@ -181,11 +181,11 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaImageModel],
|
||||
options=LumaImageModel,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[ratio.value for ratio in LumaAspectRatio],
|
||||
options=LumaAspectRatio,
|
||||
default=LumaAspectRatio.ratio_16_9,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
@@ -366,7 +366,7 @@ class LumaImageModifyNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaImageModel],
|
||||
options=LumaImageModel,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
@@ -466,21 +466,21 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaVideoModel],
|
||||
options=LumaVideoModel,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[ratio.value for ratio in LumaAspectRatio],
|
||||
options=LumaAspectRatio,
|
||||
default=LumaAspectRatio.ratio_16_9,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
options=LumaVideoOutputResolution,
|
||||
default=LumaVideoOutputResolution.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[dur.value for dur in LumaVideoModelOutputDuration],
|
||||
options=LumaVideoModelOutputDuration,
|
||||
),
|
||||
comfy_io.Boolean.Input(
|
||||
"loop",
|
||||
@@ -595,7 +595,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaVideoModel],
|
||||
options=LumaVideoModel,
|
||||
),
|
||||
# comfy_io.Combo.Input(
|
||||
# "aspect_ratio",
|
||||
@@ -604,7 +604,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
|
||||
# ),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
options=LumaVideoOutputResolution,
|
||||
default=LumaVideoOutputResolution.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
|
||||
@@ -500,7 +500,7 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode):
|
||||
raise Exception(
|
||||
f"No video was found in the response. Full response: {file_result.model_dump()}"
|
||||
)
|
||||
logging.info(f"Generated video URL: {file_url}")
|
||||
logging.info("Generated video URL: %s", file_url)
|
||||
if cls.hidden.unique_id:
|
||||
if hasattr(file_result.file, "backup_download_url"):
|
||||
message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}"
|
||||
|
||||
@@ -237,7 +237,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput:
|
||||
audio_stream = None
|
||||
|
||||
for stream in input_container.streams:
|
||||
logging.info(f"Found stream: type={stream.type}, class={type(stream)}")
|
||||
logging.info("Found stream: type=%s, class=%s", stream.type, type(stream))
|
||||
if isinstance(stream, av.VideoStream):
|
||||
# Create output video stream with same parameters
|
||||
video_stream = output_container.add_stream(
|
||||
@@ -247,7 +247,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput:
|
||||
video_stream.height = stream.height
|
||||
video_stream.pix_fmt = "yuv420p"
|
||||
logging.info(
|
||||
f"Added video stream: {stream.width}x{stream.height} @ {stream.average_rate}fps"
|
||||
"Added video stream: %sx%s @ %sfps", stream.width, stream.height, stream.average_rate
|
||||
)
|
||||
elif isinstance(stream, av.AudioStream):
|
||||
# Create output audio stream with same parameters
|
||||
@@ -256,9 +256,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput:
|
||||
)
|
||||
audio_stream.sample_rate = stream.sample_rate
|
||||
audio_stream.layout = stream.layout
|
||||
logging.info(
|
||||
f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels"
|
||||
)
|
||||
logging.info("Added audio stream: %sHz, %s channels", stream.sample_rate, stream.channels)
|
||||
|
||||
# Calculate target frame count that's divisible by 16
|
||||
fps = input_container.streams.video[0].average_rate
|
||||
@@ -288,9 +286,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput:
|
||||
for packet in video_stream.encode():
|
||||
output_container.mux(packet)
|
||||
|
||||
logging.info(
|
||||
f"Encoded {frame_count} video frames (target: {target_frames})"
|
||||
)
|
||||
logging.info("Encoded %s video frames (target: %s)", frame_count, target_frames)
|
||||
|
||||
# Decode and re-encode audio frames
|
||||
if audio_stream:
|
||||
@@ -308,7 +304,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput:
|
||||
for packet in audio_stream.encode():
|
||||
output_container.mux(packet)
|
||||
|
||||
logging.info(f"Encoded {audio_frame_count} audio frames")
|
||||
logging.info("Encoded %s audio frames", audio_frame_count)
|
||||
|
||||
# Close containers
|
||||
output_container.close()
|
||||
@@ -335,7 +331,7 @@ def parse_width_height_from_res(resolution: str):
|
||||
"1:1 (1152 x 1152)": {"width": 1152, "height": 1152},
|
||||
"4:3 (1536 x 1152)": {"width": 1536, "height": 1152},
|
||||
"3:4 (1152 x 1536)": {"width": 1152, "height": 1536},
|
||||
"21:9 (2560 x 1080)": {"width": 2560, "height": 1080},
|
||||
# "21:9 (2560 x 1080)": {"width": 2560, "height": 1080},
|
||||
}
|
||||
return res_map.get(resolution, {"width": 1920, "height": 1080})
|
||||
|
||||
@@ -388,11 +384,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
tooltip="Negative prompt text",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
@@ -403,14 +399,14 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
|
||||
"1:1 (1152 x 1152)",
|
||||
"4:3 (1536 x 1152)",
|
||||
"3:4 (1152 x 1536)",
|
||||
"21:9 (2560 x 1080)",
|
||||
# "21:9 (2560 x 1080)",
|
||||
],
|
||||
default="16:9 (1920 x 1080)",
|
||||
tooltip="Resolution of the output video",
|
||||
),
|
||||
comfy_io.Float.Input(
|
||||
"prompt_adherence",
|
||||
default=10.0,
|
||||
default=4.5,
|
||||
min=1.0,
|
||||
max=20.0,
|
||||
step=1.0,
|
||||
@@ -424,10 +420,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
|
||||
step=1,
|
||||
display_mode=comfy_io.NumberDisplay.number,
|
||||
tooltip="Random seed value",
|
||||
control_after_generate=True,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"steps",
|
||||
default=100,
|
||||
default=33,
|
||||
min=1,
|
||||
max=100,
|
||||
step=1,
|
||||
@@ -468,7 +465,6 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
|
||||
steps=steps,
|
||||
seed=seed,
|
||||
guidance_scale=prompt_adherence,
|
||||
num_frames=128,
|
||||
width=width_height["width"],
|
||||
height=width_height["height"],
|
||||
use_negative_prompts=True,
|
||||
@@ -526,11 +522,11 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
tooltip="Negative prompt text",
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
@@ -546,7 +542,7 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
|
||||
comfy_io.Video.Input(
|
||||
"video",
|
||||
tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. "
|
||||
"Videos longer than 5s will be automatically trimmed. Only MP4 format supported.",
|
||||
"Videos longer than 5s will be automatically trimmed. Only MP4 format supported.",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"control_type",
|
||||
@@ -563,6 +559,15 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
|
||||
tooltip="Only used if control_type is 'Motion Transfer'",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"steps",
|
||||
default=33,
|
||||
min=1,
|
||||
max=100,
|
||||
step=1,
|
||||
display_mode=comfy_io.NumberDisplay.number,
|
||||
tooltip="Number of inference steps",
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Video.Output()],
|
||||
hidden=[
|
||||
@@ -582,6 +587,8 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
|
||||
video: Optional[VideoInput] = None,
|
||||
control_type: str = "Motion Transfer",
|
||||
motion_intensity: Optional[int] = 100,
|
||||
steps=33,
|
||||
prompt_adherence=4.5,
|
||||
) -> comfy_io.NodeOutput:
|
||||
auth = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
@@ -602,6 +609,8 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
|
||||
negative_prompt=negative_prompt,
|
||||
seed=seed,
|
||||
control_params=control_params,
|
||||
steps=steps,
|
||||
guidance_scale=prompt_adherence,
|
||||
)
|
||||
|
||||
control = parse_control_parameter(control_type)
|
||||
@@ -653,11 +662,11 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
"artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, "
|
||||
"flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, "
|
||||
"cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, "
|
||||
"blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, "
|
||||
"wobbly, weird, low quality, plastic, stock footage, video camera, boring",
|
||||
tooltip="Negative prompt text",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
@@ -675,7 +684,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Float.Input(
|
||||
"prompt_adherence",
|
||||
default=10.0,
|
||||
default=4.0,
|
||||
min=1.0,
|
||||
max=20.0,
|
||||
step=1.0,
|
||||
@@ -688,11 +697,12 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
|
||||
max=4294967295,
|
||||
step=1,
|
||||
display_mode=comfy_io.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Random seed value",
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"steps",
|
||||
default=100,
|
||||
default=33,
|
||||
min=1,
|
||||
max=100,
|
||||
step=1,
|
||||
|
||||
@@ -8,30 +8,17 @@ from __future__ import annotations
|
||||
from io import BytesIO
|
||||
import logging
|
||||
from typing import Optional, TypeVar
|
||||
from enum import Enum
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io as comfy_io
|
||||
from comfy_api.input_impl import VideoFromFile
|
||||
from comfy_api.latest import ComfyExtension, comfy_io
|
||||
from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput
|
||||
from comfy_api_nodes.apinode_utils import (
|
||||
download_url_to_video_output,
|
||||
tensor_to_bytesio,
|
||||
)
|
||||
from comfy_api_nodes.apis import (
|
||||
PikaBodyGenerate22C2vGenerate22PikascenesPost,
|
||||
PikaBodyGenerate22I2vGenerate22I2vPost,
|
||||
PikaBodyGenerate22KeyframeGenerate22PikaframesPost,
|
||||
PikaBodyGenerate22T2vGenerate22T2vPost,
|
||||
PikaBodyGeneratePikadditionsGeneratePikadditionsPost,
|
||||
PikaBodyGeneratePikaffectsGeneratePikaffectsPost,
|
||||
PikaBodyGeneratePikaswapsGeneratePikaswapsPost,
|
||||
PikaGenerateResponse,
|
||||
PikaVideoResponse,
|
||||
)
|
||||
from comfy_api_nodes.apis import pika_defs
|
||||
from comfy_api_nodes.apis.client import (
|
||||
ApiEndpoint,
|
||||
EmptyRequest,
|
||||
@@ -55,116 +42,36 @@ PATH_PIKASCENES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikascenes"
|
||||
PATH_VIDEO_GET = "/proxy/pika/videos"
|
||||
|
||||
|
||||
class PikaDurationEnum(int, Enum):
|
||||
integer_5 = 5
|
||||
integer_10 = 10
|
||||
|
||||
|
||||
class PikaResolutionEnum(str, Enum):
|
||||
field_1080p = "1080p"
|
||||
field_720p = "720p"
|
||||
|
||||
|
||||
class Pikaffect(str, Enum):
|
||||
Cake_ify = "Cake-ify"
|
||||
Crumble = "Crumble"
|
||||
Crush = "Crush"
|
||||
Decapitate = "Decapitate"
|
||||
Deflate = "Deflate"
|
||||
Dissolve = "Dissolve"
|
||||
Explode = "Explode"
|
||||
Eye_pop = "Eye-pop"
|
||||
Inflate = "Inflate"
|
||||
Levitate = "Levitate"
|
||||
Melt = "Melt"
|
||||
Peel = "Peel"
|
||||
Poke = "Poke"
|
||||
Squish = "Squish"
|
||||
Ta_da = "Ta-da"
|
||||
Tear = "Tear"
|
||||
|
||||
|
||||
class PikaApiError(Exception):
|
||||
"""Exception for Pika API errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def is_valid_video_response(response: PikaVideoResponse) -> bool:
|
||||
"""Check if the video response is valid."""
|
||||
return hasattr(response, "url") and response.url is not None
|
||||
|
||||
|
||||
def is_valid_initial_response(response: PikaGenerateResponse) -> bool:
|
||||
"""Check if the initial response is valid."""
|
||||
return hasattr(response, "video_id") and response.video_id is not None
|
||||
|
||||
|
||||
async def poll_for_task_status(
|
||||
task_id: str,
|
||||
async def execute_task(
|
||||
initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse],
|
||||
auth_kwargs: Optional[dict[str, str]] = None,
|
||||
node_id: Optional[str] = None,
|
||||
) -> PikaGenerateResponse:
|
||||
polling_operation = PollingOperation(
|
||||
) -> comfy_io.NodeOutput:
|
||||
task_id = (await initial_operation.execute()).video_id
|
||||
final_response: pika_defs.PikaVideoResponse = await PollingOperation(
|
||||
poll_endpoint=ApiEndpoint(
|
||||
path=f"{PATH_VIDEO_GET}/{task_id}",
|
||||
method=HttpMethod.GET,
|
||||
request_model=EmptyRequest,
|
||||
response_model=PikaVideoResponse,
|
||||
response_model=pika_defs.PikaVideoResponse,
|
||||
),
|
||||
completed_statuses=[
|
||||
"finished",
|
||||
],
|
||||
completed_statuses=["finished"],
|
||||
failed_statuses=["failed", "cancelled"],
|
||||
status_extractor=lambda response: (
|
||||
response.status.value if response.status else None
|
||||
),
|
||||
progress_extractor=lambda response: (
|
||||
response.progress if hasattr(response, "progress") else None
|
||||
),
|
||||
status_extractor=lambda response: (response.status.value if response.status else None),
|
||||
progress_extractor=lambda response: (response.progress if hasattr(response, "progress") else None),
|
||||
auth_kwargs=auth_kwargs,
|
||||
result_url_extractor=lambda response: (
|
||||
response.url if hasattr(response, "url") else None
|
||||
),
|
||||
result_url_extractor=lambda response: (response.url if hasattr(response, "url") else None),
|
||||
node_id=node_id,
|
||||
estimated_duration=60
|
||||
)
|
||||
return await polling_operation.execute()
|
||||
|
||||
|
||||
async def execute_task(
|
||||
initial_operation: SynchronousOperation[R, PikaGenerateResponse],
|
||||
auth_kwargs: Optional[dict[str, str]] = None,
|
||||
node_id: Optional[str] = None,
|
||||
) -> tuple[VideoFromFile]:
|
||||
"""Executes the initial operation then polls for the task status until it is completed.
|
||||
|
||||
Args:
|
||||
initial_operation: The initial operation to execute.
|
||||
auth_kwargs: The authentication token(s) to use for the API call.
|
||||
|
||||
Returns:
|
||||
A tuple containing the video file as a VIDEO output.
|
||||
"""
|
||||
initial_response = await initial_operation.execute()
|
||||
if not is_valid_initial_response(initial_response):
|
||||
error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}"
|
||||
estimated_duration=60,
|
||||
max_poll_attempts=240,
|
||||
).execute()
|
||||
if not final_response.url:
|
||||
error_msg = f"Pika task {task_id} succeeded but no video data found in response:\n{final_response}"
|
||||
logging.error(error_msg)
|
||||
raise PikaApiError(error_msg)
|
||||
|
||||
task_id = initial_response.video_id
|
||||
final_response = await poll_for_task_status(task_id, auth_kwargs, node_id=node_id)
|
||||
if not is_valid_video_response(final_response):
|
||||
error_msg = (
|
||||
f"Pika task {task_id} succeeded but no video data found in response."
|
||||
)
|
||||
logging.error(error_msg)
|
||||
raise PikaApiError(error_msg)
|
||||
|
||||
video_url = str(final_response.url)
|
||||
raise Exception(error_msg)
|
||||
video_url = final_response.url
|
||||
logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url)
|
||||
|
||||
return (await download_url_to_video_output(video_url),)
|
||||
return comfy_io.NodeOutput(await download_url_to_video_output(video_url))
|
||||
|
||||
|
||||
def get_base_inputs_types() -> list[comfy_io.Input]:
|
||||
@@ -173,16 +80,12 @@ def get_base_inputs_types() -> list[comfy_io.Input]:
|
||||
comfy_io.String.Input("prompt_text", multiline=True),
|
||||
comfy_io.String.Input("negative_prompt", multiline=True),
|
||||
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution", options=[resolution.value for resolution in PikaResolutionEnum], default="1080p"
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration", options=[duration.value for duration in PikaDurationEnum], default=5
|
||||
),
|
||||
comfy_io.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"),
|
||||
comfy_io.Combo.Input("duration", options=[5, 10], default=5),
|
||||
]
|
||||
|
||||
|
||||
class PikaImageToVideoV2_2(comfy_io.ComfyNode):
|
||||
class PikaImageToVideo(comfy_io.ComfyNode):
|
||||
"""Pika 2.2 Image to Video Node."""
|
||||
|
||||
@classmethod
|
||||
@@ -215,14 +118,9 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode):
|
||||
resolution: str,
|
||||
duration: int,
|
||||
) -> comfy_io.NodeOutput:
|
||||
# Convert image to BytesIO
|
||||
image_bytes_io = tensor_to_bytesio(image)
|
||||
image_bytes_io.seek(0)
|
||||
|
||||
pika_files = {"image": ("image.png", image_bytes_io, "image/png")}
|
||||
|
||||
# Prepare non-file data
|
||||
pika_request_data = PikaBodyGenerate22I2vGenerate22I2vPost(
|
||||
pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost(
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
seed=seed,
|
||||
@@ -237,8 +135,8 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_IMAGE_TO_VIDEO,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGenerate22I2vGenerate22I2vPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=pika_request_data,
|
||||
files=pika_files,
|
||||
@@ -248,7 +146,7 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode):
|
||||
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
|
||||
|
||||
|
||||
class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode):
|
||||
class PikaTextToVideoNode(comfy_io.ComfyNode):
|
||||
"""Pika Text2Video v2.2 Node."""
|
||||
|
||||
@classmethod
|
||||
@@ -296,10 +194,10 @@ class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_TEXT_TO_VIDEO,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGenerate22T2vGenerate22T2vPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=PikaBodyGenerate22T2vGenerate22T2vPost(
|
||||
request=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost(
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
seed=seed,
|
||||
@@ -313,7 +211,7 @@ class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode):
|
||||
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
|
||||
|
||||
|
||||
class PikaScenesV2_2(comfy_io.ComfyNode):
|
||||
class PikaScenes(comfy_io.ComfyNode):
|
||||
"""PikaScenes v2.2 Node."""
|
||||
|
||||
@classmethod
|
||||
@@ -389,7 +287,6 @@ class PikaScenesV2_2(comfy_io.ComfyNode):
|
||||
image_ingredient_4: Optional[torch.Tensor] = None,
|
||||
image_ingredient_5: Optional[torch.Tensor] = None,
|
||||
) -> comfy_io.NodeOutput:
|
||||
# Convert all passed images to BytesIO
|
||||
all_image_bytes_io = []
|
||||
for image in [
|
||||
image_ingredient_1,
|
||||
@@ -399,16 +296,14 @@ class PikaScenesV2_2(comfy_io.ComfyNode):
|
||||
image_ingredient_5,
|
||||
]:
|
||||
if image is not None:
|
||||
image_bytes_io = tensor_to_bytesio(image)
|
||||
image_bytes_io.seek(0)
|
||||
all_image_bytes_io.append(image_bytes_io)
|
||||
all_image_bytes_io.append(tensor_to_bytesio(image))
|
||||
|
||||
pika_files = [
|
||||
("images", (f"image_{i}.png", image_bytes_io, "image/png"))
|
||||
for i, image_bytes_io in enumerate(all_image_bytes_io)
|
||||
]
|
||||
|
||||
pika_request_data = PikaBodyGenerate22C2vGenerate22PikascenesPost(
|
||||
pika_request_data = pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost(
|
||||
ingredientsMode=ingredients_mode,
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
@@ -425,8 +320,8 @@ class PikaScenesV2_2(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_PIKASCENES,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGenerate22C2vGenerate22PikascenesPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=pika_request_data,
|
||||
files=pika_files,
|
||||
@@ -477,22 +372,16 @@ class PikAdditionsNode(comfy_io.ComfyNode):
|
||||
negative_prompt: str,
|
||||
seed: int,
|
||||
) -> comfy_io.NodeOutput:
|
||||
# Convert video to BytesIO
|
||||
video_bytes_io = BytesIO()
|
||||
video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264)
|
||||
video_bytes_io.seek(0)
|
||||
|
||||
# Convert image to BytesIO
|
||||
image_bytes_io = tensor_to_bytesio(image)
|
||||
image_bytes_io.seek(0)
|
||||
|
||||
pika_files = {
|
||||
"video": ("video.mp4", video_bytes_io, "video/mp4"),
|
||||
"image": ("image.png", image_bytes_io, "image/png"),
|
||||
}
|
||||
|
||||
# Prepare non-file data
|
||||
pika_request_data = PikaBodyGeneratePikadditionsGeneratePikadditionsPost(
|
||||
pika_request_data = pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost(
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
seed=seed,
|
||||
@@ -505,8 +394,8 @@ class PikAdditionsNode(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_PIKADDITIONS,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=pika_request_data,
|
||||
files=pika_files,
|
||||
@@ -529,11 +418,25 @@ class PikaSwapsNode(comfy_io.ComfyNode):
|
||||
category="api node/video/Pika",
|
||||
inputs=[
|
||||
comfy_io.Video.Input("video", tooltip="The video to swap an object in."),
|
||||
comfy_io.Image.Input("image", tooltip="The image used to replace the masked object in the video."),
|
||||
comfy_io.Mask.Input("mask", tooltip="Use the mask to define areas in the video to replace"),
|
||||
comfy_io.String.Input("prompt_text", multiline=True),
|
||||
comfy_io.String.Input("negative_prompt", multiline=True),
|
||||
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True),
|
||||
comfy_io.Image.Input(
|
||||
"image",
|
||||
tooltip="The image used to replace the masked object in the video.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Mask.Input(
|
||||
"mask",
|
||||
tooltip="Use the mask to define areas in the video to replace.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.String.Input("prompt_text", multiline=True, optional=True),
|
||||
comfy_io.String.Input("negative_prompt", multiline=True, optional=True),
|
||||
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True),
|
||||
comfy_io.String.Input(
|
||||
"region_to_modify",
|
||||
multiline=True,
|
||||
optional=True,
|
||||
tooltip="Plaintext description of the object / region to modify.",
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Video.Output()],
|
||||
hidden=[
|
||||
@@ -548,41 +451,29 @@ class PikaSwapsNode(comfy_io.ComfyNode):
|
||||
async def execute(
|
||||
cls,
|
||||
video: VideoInput,
|
||||
image: torch.Tensor,
|
||||
mask: torch.Tensor,
|
||||
prompt_text: str,
|
||||
negative_prompt: str,
|
||||
seed: int,
|
||||
image: Optional[torch.Tensor] = None,
|
||||
mask: Optional[torch.Tensor] = None,
|
||||
prompt_text: str = "",
|
||||
negative_prompt: str = "",
|
||||
seed: int = 0,
|
||||
region_to_modify: str = "",
|
||||
) -> comfy_io.NodeOutput:
|
||||
# Convert video to BytesIO
|
||||
video_bytes_io = BytesIO()
|
||||
video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264)
|
||||
video_bytes_io.seek(0)
|
||||
|
||||
# Convert mask to binary mask with three channels
|
||||
mask = torch.round(mask)
|
||||
mask = mask.repeat(1, 3, 1, 1)
|
||||
|
||||
# Convert 3-channel binary mask to BytesIO
|
||||
mask_bytes_io = BytesIO()
|
||||
mask_bytes_io.write(mask.numpy().astype(np.uint8))
|
||||
mask_bytes_io.seek(0)
|
||||
|
||||
# Convert image to BytesIO
|
||||
image_bytes_io = tensor_to_bytesio(image)
|
||||
image_bytes_io.seek(0)
|
||||
|
||||
pika_files = {
|
||||
"video": ("video.mp4", video_bytes_io, "video/mp4"),
|
||||
"image": ("image.png", image_bytes_io, "image/png"),
|
||||
"modifyRegionMask": ("mask.png", mask_bytes_io, "image/png"),
|
||||
}
|
||||
if mask is not None:
|
||||
pika_files["modifyRegionMask"] = ("mask.png", tensor_to_bytesio(mask), "image/png")
|
||||
if image is not None:
|
||||
pika_files["image"] = ("image.png", tensor_to_bytesio(image), "image/png")
|
||||
|
||||
# Prepare non-file data
|
||||
pika_request_data = PikaBodyGeneratePikaswapsGeneratePikaswapsPost(
|
||||
pika_request_data = pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost(
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
seed=seed,
|
||||
modifyRegionRoi=region_to_modify if region_to_modify else None,
|
||||
)
|
||||
auth = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
@@ -590,10 +481,10 @@ class PikaSwapsNode(comfy_io.ComfyNode):
|
||||
}
|
||||
initial_operation = SynchronousOperation(
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_PIKADDITIONS,
|
||||
path=PATH_PIKASWAPS,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=pika_request_data,
|
||||
files=pika_files,
|
||||
@@ -616,7 +507,7 @@ class PikaffectsNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."),
|
||||
comfy_io.Combo.Input(
|
||||
"pikaffect", options=[pikaffect.value for pikaffect in Pikaffect], default="Cake-ify"
|
||||
"pikaffect", options=pika_defs.Pikaffect, default="Cake-ify"
|
||||
),
|
||||
comfy_io.String.Input("prompt_text", multiline=True),
|
||||
comfy_io.String.Input("negative_prompt", multiline=True),
|
||||
@@ -648,10 +539,10 @@ class PikaffectsNode(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_PIKAFFECTS,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGeneratePikaffectsGeneratePikaffectsPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=PikaBodyGeneratePikaffectsGeneratePikaffectsPost(
|
||||
request=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost(
|
||||
pikaffect=pikaffect,
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
@@ -664,7 +555,7 @@ class PikaffectsNode(comfy_io.ComfyNode):
|
||||
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
|
||||
|
||||
|
||||
class PikaStartEndFrameNode2_2(comfy_io.ComfyNode):
|
||||
class PikaStartEndFrameNode(comfy_io.ComfyNode):
|
||||
"""PikaFrames v2.2 Node."""
|
||||
|
||||
@classmethod
|
||||
@@ -711,10 +602,10 @@ class PikaStartEndFrameNode2_2(comfy_io.ComfyNode):
|
||||
endpoint=ApiEndpoint(
|
||||
path=PATH_PIKAFRAMES,
|
||||
method=HttpMethod.POST,
|
||||
request_model=PikaBodyGenerate22KeyframeGenerate22PikaframesPost,
|
||||
response_model=PikaGenerateResponse,
|
||||
request_model=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost,
|
||||
response_model=pika_defs.PikaGenerateResponse,
|
||||
),
|
||||
request=PikaBodyGenerate22KeyframeGenerate22PikaframesPost(
|
||||
request=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost(
|
||||
promptText=prompt_text,
|
||||
negativePrompt=negative_prompt,
|
||||
seed=seed,
|
||||
@@ -732,13 +623,13 @@ class PikaApiNodesExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
|
||||
return [
|
||||
PikaImageToVideoV2_2,
|
||||
PikaTextToVideoNodeV2_2,
|
||||
PikaScenesV2_2,
|
||||
PikaImageToVideo,
|
||||
PikaTextToVideoNode,
|
||||
PikaScenes,
|
||||
PikAdditionsNode,
|
||||
PikaSwapsNode,
|
||||
PikaffectsNode,
|
||||
PikaStartEndFrameNode2_2,
|
||||
PikaStartEndFrameNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ class PixverseTemplateNode(comfy_io.ComfyNode):
|
||||
display_name="PixVerse Template",
|
||||
category="api node/video/PixVerse",
|
||||
inputs=[
|
||||
comfy_io.Combo.Input("template", options=[list(pixverse_templates.keys())]),
|
||||
comfy_io.Combo.Input("template", options=list(pixverse_templates.keys())),
|
||||
],
|
||||
outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")],
|
||||
)
|
||||
@@ -120,20 +120,20 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[ratio.value for ratio in PixverseAspectRatio],
|
||||
options=PixverseAspectRatio,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"quality",
|
||||
options=[resolution.value for resolution in PixverseQuality],
|
||||
options=PixverseQuality,
|
||||
default=PixverseQuality.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration_seconds",
|
||||
options=[dur.value for dur in PixverseDuration],
|
||||
options=PixverseDuration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"motion_mode",
|
||||
options=[mode.value for mode in PixverseMotionMode],
|
||||
options=PixverseMotionMode,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
@@ -262,16 +262,16 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"quality",
|
||||
options=[resolution.value for resolution in PixverseQuality],
|
||||
options=PixverseQuality,
|
||||
default=PixverseQuality.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration_seconds",
|
||||
options=[dur.value for dur in PixverseDuration],
|
||||
options=PixverseDuration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"motion_mode",
|
||||
options=[mode.value for mode in PixverseMotionMode],
|
||||
options=PixverseMotionMode,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
@@ -403,16 +403,16 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"quality",
|
||||
options=[resolution.value for resolution in PixverseQuality],
|
||||
options=PixverseQuality,
|
||||
default=PixverseQuality.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration_seconds",
|
||||
options=[dur.value for dur in PixverseDuration],
|
||||
options=PixverseDuration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"motion_mode",
|
||||
options=[mode.value for mode in PixverseMotionMode],
|
||||
options=PixverseMotionMode,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
|
||||
@@ -172,16 +172,16 @@ async def create_generate_task(
|
||||
logging.info("[ Rodin3D API - Submit Jobs ] Submit Generate Task Success!")
|
||||
subscription_key = response.jobs.subscription_key
|
||||
task_uuid = response.uuid
|
||||
logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}")
|
||||
logging.info("[ Rodin3D API - Submit Jobs ] UUID: %s", task_uuid)
|
||||
return task_uuid, subscription_key
|
||||
|
||||
|
||||
def check_rodin_status(response: Rodin3DCheckStatusResponse) -> str:
|
||||
all_done = all(job.status == JobStatus.Done for job in response.jobs)
|
||||
status_list = [str(job.status) for job in response.jobs]
|
||||
logging.info(f"[ Rodin3D API - CheckStatus ] Generate Status: {status_list}")
|
||||
logging.info("[ Rodin3D API - CheckStatus ] Generate Status: %s", status_list)
|
||||
if any(job.status == JobStatus.Failed for job in response.jobs):
|
||||
logging.error(f"[ Rodin3D API - CheckStatus ] Generate Failed: {status_list}, Please try again.")
|
||||
logging.error("[ Rodin3D API - CheckStatus ] Generate Failed: %s, Please try again.", status_list)
|
||||
raise Exception("[ Rodin3D API ] Generate Failed, Please Try again.")
|
||||
if all_done:
|
||||
return "DONE"
|
||||
@@ -235,7 +235,7 @@ async def download_files(url_list, task_uuid):
|
||||
file_path = os.path.join(save_path, file_name)
|
||||
if file_path.endswith(".glb"):
|
||||
model_file_path = file_path
|
||||
logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}")
|
||||
logging.info("[ Rodin3D API - download_files ] Downloading file: %s", file_path)
|
||||
max_retries = 5
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
@@ -246,7 +246,7 @@ async def download_files(url_list, task_uuid):
|
||||
f.write(chunk)
|
||||
break
|
||||
except Exception as e:
|
||||
logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}")
|
||||
logging.info("[ Rodin3D API - download_files ] Error downloading %s:%s", file_path, str(e))
|
||||
if attempt < max_retries - 1:
|
||||
logging.info("Retrying...")
|
||||
await asyncio.sleep(2)
|
||||
|
||||
@@ -200,11 +200,11 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[model.value for model in Duration],
|
||||
options=Duration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"ratio",
|
||||
options=[model.value for model in RunwayGen3aAspectRatio],
|
||||
options=RunwayGen3aAspectRatio,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
@@ -300,11 +300,11 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[model.value for model in Duration],
|
||||
options=Duration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"ratio",
|
||||
options=[model.value for model in RunwayGen4TurboAspectRatio],
|
||||
options=RunwayGen4TurboAspectRatio,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
@@ -408,11 +408,11 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[model.value for model in Duration],
|
||||
options=Duration,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"ratio",
|
||||
options=[model.value for model in RunwayGen3aAspectRatio],
|
||||
options=RunwayGen3aAspectRatio,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
|
||||
@@ -82,8 +82,8 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[x.value for x in StabilityAspectRatio],
|
||||
default=StabilityAspectRatio.ratio_1_1.value,
|
||||
options=StabilityAspectRatio,
|
||||
default=StabilityAspectRatio.ratio_1_1,
|
||||
tooltip="Aspect ratio of generated image.",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
@@ -217,12 +217,12 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[x.value for x in Stability_SD3_5_Model],
|
||||
options=Stability_SD3_5_Model,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[x.value for x in StabilityAspectRatio],
|
||||
default=StabilityAspectRatio.ratio_1_1.value,
|
||||
options=StabilityAspectRatio,
|
||||
default=StabilityAspectRatio.ratio_1_1,
|
||||
tooltip="Aspect ratio of generated image.",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
|
||||
@@ -215,7 +215,7 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode):
|
||||
initial_response = await initial_operation.execute()
|
||||
operation_name = initial_response.name
|
||||
|
||||
logging.info(f"Veo generation started with operation name: {operation_name}")
|
||||
logging.info("Veo generation started with operation name: %s", operation_name)
|
||||
|
||||
# Define status extractor function
|
||||
def status_extractor(response):
|
||||
|
||||
@@ -173,8 +173,8 @@ class ViduTextToVideoNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in VideoModelName],
|
||||
default=VideoModelName.vidu_q1.value,
|
||||
options=VideoModelName,
|
||||
default=VideoModelName.vidu_q1,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.String.Input(
|
||||
@@ -205,22 +205,22 @@ class ViduTextToVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[model.value for model in AspectRatio],
|
||||
default=AspectRatio.r_16_9.value,
|
||||
options=AspectRatio,
|
||||
default=AspectRatio.r_16_9,
|
||||
tooltip="The aspect ratio of the output video",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[model.value for model in Resolution],
|
||||
default=Resolution.r_1080p.value,
|
||||
options=Resolution,
|
||||
default=Resolution.r_1080p,
|
||||
tooltip="Supported values may vary by model & duration",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"movement_amplitude",
|
||||
options=[model.value for model in MovementAmplitude],
|
||||
default=MovementAmplitude.auto.value,
|
||||
options=MovementAmplitude,
|
||||
default=MovementAmplitude.auto,
|
||||
tooltip="The movement amplitude of objects in the frame",
|
||||
optional=True,
|
||||
),
|
||||
@@ -278,8 +278,8 @@ class ViduImageToVideoNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in VideoModelName],
|
||||
default=VideoModelName.vidu_q1.value,
|
||||
options=VideoModelName,
|
||||
default=VideoModelName.vidu_q1,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
@@ -316,14 +316,14 @@ class ViduImageToVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[model.value for model in Resolution],
|
||||
default=Resolution.r_1080p.value,
|
||||
options=Resolution,
|
||||
default=Resolution.r_1080p,
|
||||
tooltip="Supported values may vary by model & duration",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"movement_amplitude",
|
||||
options=[model.value for model in MovementAmplitude],
|
||||
options=MovementAmplitude,
|
||||
default=MovementAmplitude.auto.value,
|
||||
tooltip="The movement amplitude of objects in the frame",
|
||||
optional=True,
|
||||
@@ -388,8 +388,8 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode):
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in VideoModelName],
|
||||
default=VideoModelName.vidu_q1.value,
|
||||
options=VideoModelName,
|
||||
default=VideoModelName.vidu_q1,
|
||||
tooltip="Model name",
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
@@ -424,8 +424,8 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode):
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[model.value for model in AspectRatio],
|
||||
default=AspectRatio.r_16_9.value,
|
||||
options=AspectRatio,
|
||||
default=AspectRatio.r_16_9,
|
||||
tooltip="The aspect ratio of the output video",
|
||||
optional=True,
|
||||
),
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import torch
|
||||
import comfy.utils
|
||||
from enum import Enum
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
def resize_mask(mask, shape):
|
||||
return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1)
|
||||
@@ -101,24 +104,28 @@ def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_
|
||||
return out_image, out_alpha
|
||||
|
||||
|
||||
class PorterDuffImageComposite:
|
||||
class PorterDuffImageComposite(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"source": ("IMAGE",),
|
||||
"source_alpha": ("MASK",),
|
||||
"destination": ("IMAGE",),
|
||||
"destination_alpha": ("MASK",),
|
||||
"mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}),
|
||||
},
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PorterDuffImageComposite",
|
||||
display_name="Porter-Duff Image Composite",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
io.Image.Input("destination"),
|
||||
io.Mask.Input("destination_alpha"),
|
||||
io.Combo.Input("mode", options=[mode.name for mode in PorterDuffMode], default=PorterDuffMode.DST.name),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
io.Mask.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "MASK")
|
||||
FUNCTION = "composite"
|
||||
CATEGORY = "mask/compositing"
|
||||
|
||||
def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode):
|
||||
@classmethod
|
||||
def execute(cls, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode) -> io.NodeOutput:
|
||||
batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha))
|
||||
out_images = []
|
||||
out_alphas = []
|
||||
@@ -150,45 +157,48 @@ class PorterDuffImageComposite:
|
||||
out_images.append(out_image)
|
||||
out_alphas.append(out_alpha.squeeze(2))
|
||||
|
||||
result = (torch.stack(out_images), torch.stack(out_alphas))
|
||||
return result
|
||||
return io.NodeOutput(torch.stack(out_images), torch.stack(out_alphas))
|
||||
|
||||
|
||||
class SplitImageWithAlpha:
|
||||
class SplitImageWithAlpha(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
}
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SplitImageWithAlpha",
|
||||
display_name="Split Image with Alpha",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
io.Mask.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "mask/compositing"
|
||||
RETURN_TYPES = ("IMAGE", "MASK")
|
||||
FUNCTION = "split_image_with_alpha"
|
||||
|
||||
def split_image_with_alpha(self, image: torch.Tensor):
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor) -> io.NodeOutput:
|
||||
out_images = [i[:,:,:3] for i in image]
|
||||
out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image]
|
||||
result = (torch.stack(out_images), 1.0 - torch.stack(out_alphas))
|
||||
return result
|
||||
return io.NodeOutput(torch.stack(out_images), 1.0 - torch.stack(out_alphas))
|
||||
|
||||
|
||||
class JoinImageWithAlpha:
|
||||
class JoinImageWithAlpha(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"alpha": ("MASK",),
|
||||
}
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="JoinImageWithAlpha",
|
||||
display_name="Join Image with Alpha",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
CATEGORY = "mask/compositing"
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "join_image_with_alpha"
|
||||
|
||||
def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor):
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor, alpha: torch.Tensor) -> io.NodeOutput:
|
||||
batch_size = min(len(image), len(alpha))
|
||||
out_images = []
|
||||
|
||||
@@ -196,19 +206,18 @@ class JoinImageWithAlpha:
|
||||
for i in range(batch_size):
|
||||
out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2))
|
||||
|
||||
result = (torch.stack(out_images),)
|
||||
return result
|
||||
return io.NodeOutput(torch.stack(out_images))
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"PorterDuffImageComposite": PorterDuffImageComposite,
|
||||
"SplitImageWithAlpha": SplitImageWithAlpha,
|
||||
"JoinImageWithAlpha": JoinImageWithAlpha,
|
||||
}
|
||||
class CompositingExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
PorterDuffImageComposite,
|
||||
SplitImageWithAlpha,
|
||||
JoinImageWithAlpha,
|
||||
]
|
||||
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"PorterDuffImageComposite": "Porter-Duff Image Composite",
|
||||
"SplitImageWithAlpha": "Split Image with Alpha",
|
||||
"JoinImageWithAlpha": "Join Image with Alpha",
|
||||
}
|
||||
async def comfy_entrypoint() -> CompositingExtension:
|
||||
return CompositingExtension()
|
||||
|
||||
@@ -1,60 +1,80 @@
|
||||
import node_helpers
|
||||
import comfy.utils
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
class CLIPTextEncodeFlux:
|
||||
|
||||
class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"clip": ("CLIP", ),
|
||||
"clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||
"t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||
"guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
|
||||
}}
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "encode"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux",
|
||||
category="advanced/conditioning/flux",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
|
||||
def encode(self, clip, clip_l, t5xxl, guidance):
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_l, t5xxl, guidance) -> io.NodeOutput:
|
||||
tokens = clip.tokenize(clip_l)
|
||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||
|
||||
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}), )
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
|
||||
|
||||
class FluxGuidance:
|
||||
encode = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning": ("CONDITIONING", ),
|
||||
"guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "append"
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
|
||||
def append(self, conditioning, guidance):
|
||||
@classmethod
|
||||
def execute(cls, conditioning, guidance) -> io.NodeOutput:
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
|
||||
return (c, )
|
||||
return io.NodeOutput(c)
|
||||
|
||||
append = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxDisableGuidance:
|
||||
class FluxDisableGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning": ("CONDITIONING", ),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxDisableGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "append"
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models"
|
||||
|
||||
def append(self, conditioning):
|
||||
@classmethod
|
||||
def execute(cls, conditioning) -> io.NodeOutput:
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
||||
return (c, )
|
||||
return io.NodeOutput(c)
|
||||
|
||||
append = execute # TODO: remove
|
||||
|
||||
|
||||
PREFERED_KONTEXT_RESOLUTIONS = [
|
||||
@@ -78,52 +98,73 @@ PREFERED_KONTEXT_RESOLUTIONS = [
|
||||
]
|
||||
|
||||
|
||||
class FluxKontextImageScale:
|
||||
class FluxKontextImageScale(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"image": ("IMAGE", ),
|
||||
},
|
||||
}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxKontextImageScale",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "scale"
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
DESCRIPTION = "This node resizes the image to one that is more optimal for flux kontext."
|
||||
|
||||
def scale(self, image):
|
||||
@classmethod
|
||||
def execute(cls, image) -> io.NodeOutput:
|
||||
width = image.shape[2]
|
||||
height = image.shape[1]
|
||||
aspect_ratio = width / height
|
||||
_, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS)
|
||||
image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1)
|
||||
return (image, )
|
||||
return io.NodeOutput(image)
|
||||
|
||||
scale = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxKontextMultiReferenceLatentMethod:
|
||||
class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning": ("CONDITIONING", ),
|
||||
"reference_latents_method": (("offset", "index", "uxo/uno"), ),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxKontextMultiReferenceLatentMethod",
|
||||
category="advanced/conditioning/flux",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Combo.Input(
|
||||
"reference_latents_method",
|
||||
options=["offset", "index", "uxo/uno"],
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "append"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
|
||||
def append(self, conditioning, reference_latents_method):
|
||||
@classmethod
|
||||
def execute(cls, conditioning, reference_latents_method) -> io.NodeOutput:
|
||||
if "uxo" in reference_latents_method or "uso" in reference_latents_method:
|
||||
reference_latents_method = "uxo"
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"reference_latents_method": reference_latents_method})
|
||||
return (c, )
|
||||
return io.NodeOutput(c)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"CLIPTextEncodeFlux": CLIPTextEncodeFlux,
|
||||
"FluxGuidance": FluxGuidance,
|
||||
"FluxDisableGuidance": FluxDisableGuidance,
|
||||
"FluxKontextImageScale": FluxKontextImageScale,
|
||||
"FluxKontextMultiReferenceLatentMethod": FluxKontextMultiReferenceLatentMethod,
|
||||
}
|
||||
append = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
CLIPTextEncodeFlux,
|
||||
FluxGuidance,
|
||||
FluxDisableGuidance,
|
||||
FluxKontextImageScale,
|
||||
FluxKontextMultiReferenceLatentMethod,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> FluxExtension:
|
||||
return FluxExtension()
|
||||
|
||||
@@ -2,6 +2,8 @@ import comfy.utils
|
||||
import comfy_extras.nodes_post_processing
|
||||
import torch
|
||||
import nodes
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
def reshape_latent_to(target_shape, latent, repeat_batch=True):
|
||||
@@ -13,17 +15,23 @@ def reshape_latent_to(target_shape, latent, repeat_batch=True):
|
||||
return latent
|
||||
|
||||
|
||||
class LatentAdd:
|
||||
class LatentAdd(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentAdd",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples1, samples2):
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2) -> io.NodeOutput:
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
@@ -31,19 +39,25 @@ class LatentAdd:
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2)
|
||||
samples_out["samples"] = s1 + s2
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentSubtract:
|
||||
class LatentSubtract(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentSubtract",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples1, samples2):
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2) -> io.NodeOutput:
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
@@ -51,41 +65,49 @@ class LatentSubtract:
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2)
|
||||
samples_out["samples"] = s1 - s2
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentMultiply:
|
||||
class LatentMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples": ("LATENT",),
|
||||
"multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentMultiply",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples, multiplier):
|
||||
@classmethod
|
||||
def execute(cls, samples, multiplier) -> io.NodeOutput:
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = s1 * multiplier
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentInterpolate:
|
||||
class LatentInterpolate(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples1": ("LATENT",),
|
||||
"samples2": ("LATENT",),
|
||||
"ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentInterpolate",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples1, samples2, ratio):
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2, ratio) -> io.NodeOutput:
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
@@ -104,19 +126,26 @@ class LatentInterpolate:
|
||||
st = torch.nan_to_num(t / mt)
|
||||
|
||||
samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentConcat:
|
||||
class LatentConcat(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",), "dim": (["x", "-x", "y", "-y", "t", "-t"], )}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentConcat",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
io.Combo.Input("dim", options=["x", "-x", "y", "-y", "t", "-t"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples1, samples2, dim):
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2, dim) -> io.NodeOutput:
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
@@ -136,22 +165,27 @@ class LatentConcat:
|
||||
dim = -3
|
||||
|
||||
samples_out["samples"] = torch.cat(c, dim=dim)
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentCut:
|
||||
class LatentCut(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"samples": ("LATENT",),
|
||||
"dim": (["x", "y", "t"], ),
|
||||
"index": ("INT", {"default": 0, "min": -nodes.MAX_RESOLUTION, "max": nodes.MAX_RESOLUTION, "step": 1}),
|
||||
"amount": ("INT", {"default": 1, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 1})}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentCut",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("dim", options=["x", "y", "t"]),
|
||||
io.Int.Input("index", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("amount", default=1, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples, dim, index, amount):
|
||||
@classmethod
|
||||
def execute(cls, samples, dim, index, amount) -> io.NodeOutput:
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
@@ -171,19 +205,25 @@ class LatentCut:
|
||||
amount = min(-index, amount)
|
||||
|
||||
samples_out["samples"] = torch.narrow(s1, dim, index, amount)
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentBatch:
|
||||
class LatentBatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentBatch",
|
||||
category="latent/batch",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "batch"
|
||||
|
||||
CATEGORY = "latent/batch"
|
||||
|
||||
def batch(self, samples1, samples2):
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2) -> io.NodeOutput:
|
||||
samples_out = samples1.copy()
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
@@ -192,20 +232,25 @@ class LatentBatch:
|
||||
s = torch.cat((s1, s2), dim=0)
|
||||
samples_out["samples"] = s
|
||||
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentBatchSeedBehavior:
|
||||
class LatentBatchSeedBehavior(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples": ("LATENT",),
|
||||
"seed_behavior": (["random", "fixed"],{"default": "fixed"}),}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentBatchSeedBehavior",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced"
|
||||
|
||||
def op(self, samples, seed_behavior):
|
||||
@classmethod
|
||||
def execute(cls, samples, seed_behavior) -> io.NodeOutput:
|
||||
samples_out = samples.copy()
|
||||
latent = samples["samples"]
|
||||
if seed_behavior == "random":
|
||||
@@ -215,41 +260,50 @@ class LatentBatchSeedBehavior:
|
||||
batch_number = samples_out.get("batch_index", [0])[0]
|
||||
samples_out["batch_index"] = [batch_number] * latent.shape[0]
|
||||
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentApplyOperation:
|
||||
class LatentApplyOperation(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples": ("LATENT",),
|
||||
"operation": ("LATENT_OPERATION",),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperation",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.LatentOperation.Input("operation"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, samples, operation):
|
||||
@classmethod
|
||||
def execute(cls, samples, operation) -> io.NodeOutput:
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = operation(latent=s1)
|
||||
return (samples_out,)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
class LatentApplyOperationCFG:
|
||||
class LatentApplyOperationCFG(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"operation": ("LATENT_OPERATION",),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperationCFG",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.LatentOperation.Input("operation"),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def patch(self, model, operation):
|
||||
@classmethod
|
||||
def execute(cls, model, operation) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
|
||||
def pre_cfg_function(args):
|
||||
@@ -261,21 +315,25 @@ class LatentApplyOperationCFG:
|
||||
return conds_out
|
||||
|
||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
class LatentOperationTonemapReinhard:
|
||||
class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationTonemapReinhard",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.LatentOperation.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT_OPERATION",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, multiplier):
|
||||
@classmethod
|
||||
def execute(cls, multiplier) -> io.NodeOutput:
|
||||
def tonemap_reinhard(latent, **kwargs):
|
||||
latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
|
||||
normalized_latent = latent / latent_vector_magnitude
|
||||
@@ -291,39 +349,27 @@ class LatentOperationTonemapReinhard:
|
||||
new_magnitude *= top
|
||||
|
||||
return normalized_latent * new_magnitude
|
||||
return (tonemap_reinhard,)
|
||||
return io.NodeOutput(tonemap_reinhard)
|
||||
|
||||
class LatentOperationSharpen:
|
||||
class LatentOperationSharpen(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"sharpen_radius": ("INT", {
|
||||
"default": 9,
|
||||
"min": 1,
|
||||
"max": 31,
|
||||
"step": 1
|
||||
}),
|
||||
"sigma": ("FLOAT", {
|
||||
"default": 1.0,
|
||||
"min": 0.1,
|
||||
"max": 10.0,
|
||||
"step": 0.1
|
||||
}),
|
||||
"alpha": ("FLOAT", {
|
||||
"default": 0.1,
|
||||
"min": 0.0,
|
||||
"max": 5.0,
|
||||
"step": 0.01
|
||||
}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationSharpen",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
|
||||
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
|
||||
io.Float.Input("alpha", default=0.1, min=0.0, max=5.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.LatentOperation.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("LATENT_OPERATION",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, sharpen_radius, sigma, alpha):
|
||||
@classmethod
|
||||
def execute(cls, sharpen_radius, sigma, alpha) -> io.NodeOutput:
|
||||
def sharpen(latent, **kwargs):
|
||||
luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None]
|
||||
normalized_latent = latent / luminance
|
||||
@@ -340,19 +386,27 @@ class LatentOperationSharpen:
|
||||
sharpened = torch.nn.functional.conv2d(padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
|
||||
|
||||
return luminance * sharpened
|
||||
return (sharpen,)
|
||||
return io.NodeOutput(sharpen)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"LatentAdd": LatentAdd,
|
||||
"LatentSubtract": LatentSubtract,
|
||||
"LatentMultiply": LatentMultiply,
|
||||
"LatentInterpolate": LatentInterpolate,
|
||||
"LatentConcat": LatentConcat,
|
||||
"LatentCut": LatentCut,
|
||||
"LatentBatch": LatentBatch,
|
||||
"LatentBatchSeedBehavior": LatentBatchSeedBehavior,
|
||||
"LatentApplyOperation": LatentApplyOperation,
|
||||
"LatentApplyOperationCFG": LatentApplyOperationCFG,
|
||||
"LatentOperationTonemapReinhard": LatentOperationTonemapReinhard,
|
||||
"LatentOperationSharpen": LatentOperationSharpen,
|
||||
}
|
||||
|
||||
class LatentExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
LatentAdd,
|
||||
LatentSubtract,
|
||||
LatentMultiply,
|
||||
LatentInterpolate,
|
||||
LatentConcat,
|
||||
LatentCut,
|
||||
LatentBatch,
|
||||
LatentBatchSeedBehavior,
|
||||
LatentApplyOperation,
|
||||
LatentApplyOperationCFG,
|
||||
LatentOperationTonemapReinhard,
|
||||
LatentOperationSharpen,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> LatentExtension:
|
||||
return LatentExtension()
|
||||
|
||||
@@ -5,6 +5,8 @@ import folder_paths
|
||||
import os
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
CLAMP_QUANTILE = 0.99
|
||||
|
||||
@@ -71,32 +73,40 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
|
||||
output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu()
|
||||
return output_sd
|
||||
|
||||
class LoraSave:
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
class LoraSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoraSave",
|
||||
display_name="Extract and Save Lora",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
|
||||
io.Int.Input("rank", default=8, min=1, max=4096, step=1),
|
||||
io.Combo.Input("lora_type", options=tuple(LORA_TYPES.keys())),
|
||||
io.Boolean.Input("bias_diff", default=True),
|
||||
io.Model.Input(
|
||||
"model_diff",
|
||||
tooltip="The ModelSubtract output to be converted to a lora.",
|
||||
optional=True,
|
||||
),
|
||||
io.Clip.Input(
|
||||
"text_encoder_diff",
|
||||
tooltip="The CLIPSubtract output to be converted to a lora.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}),
|
||||
"rank": ("INT", {"default": 8, "min": 1, "max": 4096, "step": 1}),
|
||||
"lora_type": (tuple(LORA_TYPES.keys()),),
|
||||
"bias_diff": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
"optional": {"model_diff": ("MODEL", {"tooltip": "The ModelSubtract output to be converted to a lora."}),
|
||||
"text_encoder_diff": ("CLIP", {"tooltip": "The CLIPSubtract output to be converted to a lora."})},
|
||||
}
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "save"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "_for_testing"
|
||||
|
||||
def save(self, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None):
|
||||
def execute(cls, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None) -> io.NodeOutput:
|
||||
if model_diff is None and text_encoder_diff is None:
|
||||
return {}
|
||||
return io.NodeOutput()
|
||||
|
||||
lora_type = LORA_TYPES.get(lora_type)
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory())
|
||||
|
||||
output_sd = {}
|
||||
if model_diff is not None:
|
||||
@@ -108,12 +118,16 @@ class LoraSave:
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
|
||||
comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None)
|
||||
return {}
|
||||
return io.NodeOutput()
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"LoraSave": LoraSave
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"LoraSave": "Extract and Save Lora"
|
||||
}
|
||||
class LoraSaveExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
LoraSave,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> LoraSaveExtension:
|
||||
return LoraSaveExtension()
|
||||
|
||||
@@ -1,24 +1,33 @@
|
||||
from typing_extensions import override
|
||||
import comfy.utils
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
class PatchModelAddDownscale:
|
||||
upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
|
||||
|
||||
class PatchModelAddDownscale(io.ComfyNode):
|
||||
UPSCALE_METHODS = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}),
|
||||
"downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}),
|
||||
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"downscale_after_skip": ("BOOLEAN", {"default": True}),
|
||||
"downscale_method": (s.upscale_methods,),
|
||||
"upscale_method": (s.upscale_methods,),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PatchModelAddDownscale",
|
||||
display_name="PatchModelAddDownscale (Kohya Deep Shrink)",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
|
||||
io.Float.Input("downscale_factor", default=2.0, min=0.1, max=9.0, step=0.001),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.35, min=0.0, max=1.0, step=0.001),
|
||||
io.Boolean.Input("downscale_after_skip", default=True),
|
||||
io.Combo.Input("downscale_method", options=cls.UPSCALE_METHODS),
|
||||
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "model_patches/unet"
|
||||
|
||||
def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method):
|
||||
@classmethod
|
||||
def execute(cls, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method) -> io.NodeOutput:
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
sigma_start = model_sampling.percent_to_sigma(start_percent)
|
||||
sigma_end = model_sampling.percent_to_sigma(end_percent)
|
||||
@@ -41,13 +50,21 @@ class PatchModelAddDownscale:
|
||||
else:
|
||||
m.set_model_input_block_patch(input_block_patch)
|
||||
m.set_model_output_block_patch(output_block_patch)
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"PatchModelAddDownscale": PatchModelAddDownscale,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
# Sampling
|
||||
"PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)",
|
||||
"PatchModelAddDownscale": "",
|
||||
}
|
||||
|
||||
class ModelDownscaleExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
PatchModelAddDownscale,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> ModelDownscaleExtension:
|
||||
return ModelDownscaleExtension()
|
||||
|
||||
@@ -3,64 +3,83 @@ import comfy.sd
|
||||
import comfy.model_management
|
||||
import nodes
|
||||
import torch
|
||||
import comfy_extras.nodes_slg
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from comfy_extras.nodes_slg import SkipLayerGuidanceDiT
|
||||
|
||||
|
||||
class TripleCLIPLoader:
|
||||
class TripleCLIPLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), "clip_name3": (folder_paths.get_filename_list("text_encoders"), )
|
||||
}}
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
FUNCTION = "load_clip"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TripleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nsd3: clip-l, clip-g, t5",
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")),
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "advanced/loaders"
|
||||
|
||||
DESCRIPTION = "[Recipes]\n\nsd3: clip-l, clip-g, t5"
|
||||
|
||||
def load_clip(self, clip_name1, clip_name2, clip_name3):
|
||||
@classmethod
|
||||
def execute(cls, clip_name1, clip_name2, clip_name3) -> io.NodeOutput:
|
||||
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
||||
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
||||
clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
|
||||
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3], embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
||||
return (clip,)
|
||||
return io.NodeOutput(clip)
|
||||
|
||||
load_clip = execute # TODO: remove
|
||||
|
||||
|
||||
class EmptySD3LatentImage:
|
||||
def __init__(self):
|
||||
self.device = comfy.model_management.intermediate_device()
|
||||
class EmptySD3LatentImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptySD3LatentImage",
|
||||
category="latent/sd3",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "generate"
|
||||
def execute(cls, width, height, batch_size=1) -> io.NodeOutput:
|
||||
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples":latent})
|
||||
|
||||
CATEGORY = "latent/sd3"
|
||||
|
||||
def generate(self, width, height, batch_size=1):
|
||||
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device)
|
||||
return ({"samples":latent}, )
|
||||
generate = execute # TODO: remove
|
||||
|
||||
|
||||
class CLIPTextEncodeSD3:
|
||||
class CLIPTextEncodeSD3(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"clip": ("CLIP", ),
|
||||
"clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||
"clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||
"t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||
"empty_padding": (["none", "empty_prompt"], )
|
||||
}}
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "encode"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSD3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||
io.Combo.Input("empty_padding", options=["none", "empty_prompt"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "advanced/conditioning"
|
||||
|
||||
def encode(self, clip, clip_l, clip_g, t5xxl, empty_padding):
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_l, clip_g, t5xxl, empty_padding) -> io.NodeOutput:
|
||||
no_padding = empty_padding == "none"
|
||||
|
||||
tokens = clip.tokenize(clip_g)
|
||||
@@ -82,57 +101,112 @@ class CLIPTextEncodeSD3:
|
||||
tokens["l"] += empty["l"]
|
||||
while len(tokens["l"]) > len(tokens["g"]):
|
||||
tokens["g"] += empty["g"]
|
||||
return (clip.encode_from_tokens_scheduled(tokens), )
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
encode = execute # TODO: remove
|
||||
|
||||
|
||||
class ControlNetApplySD3(nodes.ControlNetApplyAdvanced):
|
||||
class ControlNetApplySD3(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"positive": ("CONDITIONING", ),
|
||||
"negative": ("CONDITIONING", ),
|
||||
"control_net": ("CONTROL_NET", ),
|
||||
"vae": ("VAE", ),
|
||||
"image": ("IMAGE", ),
|
||||
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
|
||||
}}
|
||||
CATEGORY = "conditioning/controlnet"
|
||||
DEPRECATED = True
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="ControlNetApplySD3",
|
||||
display_name="Apply Controlnet with VAE",
|
||||
category="conditioning/controlnet",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
],
|
||||
is_deprecated=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None) -> io.NodeOutput:
|
||||
if strength == 0:
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
control_hint = image.movedim(-1, 1)
|
||||
cnets = {}
|
||||
|
||||
out = []
|
||||
for conditioning in [positive, negative]:
|
||||
c = []
|
||||
for t in conditioning:
|
||||
d = t[1].copy()
|
||||
|
||||
prev_cnet = d.get('control', None)
|
||||
if prev_cnet in cnets:
|
||||
c_net = cnets[prev_cnet]
|
||||
else:
|
||||
c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent),
|
||||
vae=vae, extra_concat=[])
|
||||
c_net.set_previous_controlnet(prev_cnet)
|
||||
cnets[prev_cnet] = c_net
|
||||
|
||||
d['control'] = c_net
|
||||
d['control_apply_to_uncond'] = False
|
||||
n = [t[0], d]
|
||||
c.append(n)
|
||||
out.append(c)
|
||||
return io.NodeOutput(out[0], out[1])
|
||||
|
||||
apply_controlnet = execute # TODO: remove
|
||||
|
||||
|
||||
class SkipLayerGuidanceSD3(comfy_extras.nodes_slg.SkipLayerGuidanceDiT):
|
||||
class SkipLayerGuidanceSD3(io.ComfyNode):
|
||||
'''
|
||||
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
|
||||
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
|
||||
Experimental implementation by Dango233@StabilityAI.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"model": ("MODEL", ),
|
||||
"layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}),
|
||||
"start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001})
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "skip_guidance_sd3"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceSD3",
|
||||
category="advanced/guidance",
|
||||
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("layers", default="7, 8, 9", multiline=False),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1),
|
||||
io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
CATEGORY = "advanced/guidance"
|
||||
@classmethod
|
||||
def execute(cls, model, layers, scale, start_percent, end_percent) -> io.NodeOutput:
|
||||
return SkipLayerGuidanceDiT().execute(model=model, scale=scale, start_percent=start_percent, end_percent=end_percent, double_layers=layers)
|
||||
|
||||
def skip_guidance_sd3(self, model, layers, scale, start_percent, end_percent):
|
||||
return self.skip_guidance(model=model, scale=scale, start_percent=start_percent, end_percent=end_percent, double_layers=layers)
|
||||
skip_guidance_sd3 = execute # TODO: remove
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"TripleCLIPLoader": TripleCLIPLoader,
|
||||
"EmptySD3LatentImage": EmptySD3LatentImage,
|
||||
"CLIPTextEncodeSD3": CLIPTextEncodeSD3,
|
||||
"ControlNetApplySD3": ControlNetApplySD3,
|
||||
"SkipLayerGuidanceSD3": SkipLayerGuidanceSD3,
|
||||
}
|
||||
class SD3Extension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
TripleCLIPLoader,
|
||||
EmptySD3LatentImage,
|
||||
CLIPTextEncodeSD3,
|
||||
ControlNetApplySD3,
|
||||
SkipLayerGuidanceSD3,
|
||||
]
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
# Sampling
|
||||
"ControlNetApplySD3": "Apply Controlnet with VAE",
|
||||
}
|
||||
|
||||
async def comfy_entrypoint() -> SD3Extension:
|
||||
return SD3Extension()
|
||||
|
||||
@@ -1,33 +1,40 @@
|
||||
import comfy.model_patcher
|
||||
import comfy.samplers
|
||||
import re
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
class SkipLayerGuidanceDiT:
|
||||
class SkipLayerGuidanceDiT(io.ComfyNode):
|
||||
'''
|
||||
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
|
||||
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
|
||||
Original experimental implementation for SD3 by Dango233@StabilityAI.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"model": ("MODEL", ),
|
||||
"double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}),
|
||||
"start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"rescaling_scale": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "skip_guidance"
|
||||
EXPERIMENTAL = True
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceDiT",
|
||||
category="advanced/guidance",
|
||||
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("double_layers", default="7, 8, 9"),
|
||||
io.String.Input("single_layers", default="7, 8, 9"),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1),
|
||||
io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("rescaling_scale", default=0.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
DESCRIPTION = "Generic version of SkipLayerGuidance node that can be used on every DiT model."
|
||||
|
||||
CATEGORY = "advanced/guidance"
|
||||
|
||||
def skip_guidance(self, model, scale, start_percent, end_percent, double_layers="", single_layers="", rescaling_scale=0):
|
||||
@classmethod
|
||||
def execute(cls, model, scale, start_percent, end_percent, double_layers="", single_layers="", rescaling_scale=0) -> io.NodeOutput:
|
||||
# check if layer is comma separated integers
|
||||
def skip(args, extra_args):
|
||||
return args
|
||||
@@ -43,7 +50,7 @@ class SkipLayerGuidanceDiT:
|
||||
single_layers = [int(i) for i in single_layers]
|
||||
|
||||
if len(double_layers) == 0 and len(single_layers) == 0:
|
||||
return (model, )
|
||||
return io.NodeOutput(model)
|
||||
|
||||
def post_cfg_function(args):
|
||||
model = args["model"]
|
||||
@@ -76,29 +83,36 @@ class SkipLayerGuidanceDiT:
|
||||
m = model.clone()
|
||||
m.set_model_sampler_post_cfg_function(post_cfg_function)
|
||||
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
class SkipLayerGuidanceDiTSimple:
|
||||
skip_guidance = execute # TODO: remove
|
||||
|
||||
|
||||
class SkipLayerGuidanceDiTSimple(io.ComfyNode):
|
||||
'''
|
||||
Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.
|
||||
'''
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"model": ("MODEL", ),
|
||||
"double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "skip_guidance"
|
||||
EXPERIMENTAL = True
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceDiTSimple",
|
||||
category="advanced/guidance",
|
||||
description="Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("double_layers", default="7, 8, 9"),
|
||||
io.String.Input("single_layers", default="7, 8, 9"),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
DESCRIPTION = "Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass."
|
||||
|
||||
CATEGORY = "advanced/guidance"
|
||||
|
||||
def skip_guidance(self, model, start_percent, end_percent, double_layers="", single_layers=""):
|
||||
@classmethod
|
||||
def execute(cls, model, start_percent, end_percent, double_layers="", single_layers="") -> io.NodeOutput:
|
||||
def skip(args, extra_args):
|
||||
return args
|
||||
|
||||
@@ -113,7 +127,7 @@ class SkipLayerGuidanceDiTSimple:
|
||||
single_layers = [int(i) for i in single_layers]
|
||||
|
||||
if len(double_layers) == 0 and len(single_layers) == 0:
|
||||
return (model, )
|
||||
return io.NodeOutput(model)
|
||||
|
||||
def calc_cond_batch_function(args):
|
||||
x = args["input"]
|
||||
@@ -144,9 +158,19 @@ class SkipLayerGuidanceDiTSimple:
|
||||
m = model.clone()
|
||||
m.set_model_sampler_calc_cond_batch_function(calc_cond_batch_function)
|
||||
|
||||
return (m, )
|
||||
return io.NodeOutput(m)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"SkipLayerGuidanceDiT": SkipLayerGuidanceDiT,
|
||||
"SkipLayerGuidanceDiTSimple": SkipLayerGuidanceDiTSimple,
|
||||
}
|
||||
skip_guidance = execute # TODO: remove
|
||||
|
||||
|
||||
class SkipLayerGuidanceExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
SkipLayerGuidanceDiT,
|
||||
SkipLayerGuidanceDiTSimple,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> SkipLayerGuidanceExtension:
|
||||
return SkipLayerGuidanceExtension()
|
||||
|
||||
@@ -4,6 +4,8 @@ from comfy import model_management
|
||||
import torch
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
try:
|
||||
from spandrel_extra_arches import EXTRA_REGISTRY
|
||||
@@ -13,17 +15,23 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
class UpscaleModelLoader:
|
||||
class UpscaleModelLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ),
|
||||
}}
|
||||
RETURN_TYPES = ("UPSCALE_MODEL",)
|
||||
FUNCTION = "load_model"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="UpscaleModelLoader",
|
||||
display_name="Load Upscale Model",
|
||||
category="loaders",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")),
|
||||
],
|
||||
outputs=[
|
||||
io.UpscaleModel.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "loaders"
|
||||
|
||||
def load_model(self, model_name):
|
||||
@classmethod
|
||||
def execute(cls, model_name) -> io.NodeOutput:
|
||||
model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name)
|
||||
sd = comfy.utils.load_torch_file(model_path, safe_load=True)
|
||||
if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
|
||||
@@ -33,21 +41,29 @@ class UpscaleModelLoader:
|
||||
if not isinstance(out, ImageModelDescriptor):
|
||||
raise Exception("Upscale model must be a single-image model.")
|
||||
|
||||
return (out, )
|
||||
return io.NodeOutput(out)
|
||||
|
||||
load_model = execute # TODO: remove
|
||||
|
||||
|
||||
class ImageUpscaleWithModel:
|
||||
class ImageUpscaleWithModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "upscale_model": ("UPSCALE_MODEL",),
|
||||
"image": ("IMAGE",),
|
||||
}}
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "upscale"
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageUpscaleWithModel",
|
||||
display_name="Upscale Image (using Model)",
|
||||
category="image/upscaling",
|
||||
inputs=[
|
||||
io.UpscaleModel.Input("upscale_model"),
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
CATEGORY = "image/upscaling"
|
||||
|
||||
def upscale(self, upscale_model, image):
|
||||
@classmethod
|
||||
def execute(cls, upscale_model, image) -> io.NodeOutput:
|
||||
device = model_management.get_torch_device()
|
||||
|
||||
memory_required = model_management.module_size(upscale_model.model)
|
||||
@@ -75,9 +91,19 @@ class ImageUpscaleWithModel:
|
||||
|
||||
upscale_model.to("cpu")
|
||||
s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
|
||||
return (s,)
|
||||
return io.NodeOutput(s)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"UpscaleModelLoader": UpscaleModelLoader,
|
||||
"ImageUpscaleWithModel": ImageUpscaleWithModel
|
||||
}
|
||||
upscale = execute # TODO: remove
|
||||
|
||||
|
||||
class UpscaleModelExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
UpscaleModelLoader,
|
||||
ImageUpscaleWithModel,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> UpscaleModelExtension:
|
||||
return UpscaleModelExtension()
|
||||
|
||||
2
nodes.py
2
nodes.py
@@ -2027,7 +2027,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"DiffControlNetLoader": "Load ControlNet Model (diff)",
|
||||
"StyleModelLoader": "Load Style Model",
|
||||
"CLIPVisionLoader": "Load CLIP Vision",
|
||||
"UpscaleModelLoader": "Load Upscale Model",
|
||||
"UNETLoader": "Load Diffusion Model",
|
||||
# Conditioning
|
||||
"CLIPVisionEncode": "CLIP Vision Encode",
|
||||
@@ -2065,7 +2064,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"LoadImageOutput": "Load Image (from Outputs)",
|
||||
"ImageScale": "Upscale Image",
|
||||
"ImageScaleBy": "Upscale Image By",
|
||||
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
||||
"ImageInvert": "Invert Image",
|
||||
"ImagePadForOutpaint": "Pad Image for Outpainting",
|
||||
"ImageBatch": "Batch Images",
|
||||
|
||||
@@ -61,7 +61,6 @@ messages_control.disable = [
|
||||
# next warnings should be fixed in future
|
||||
"bad-classmethod-argument", # Class method should have 'cls' as first argument
|
||||
"wrong-import-order", # Standard imports should be placed before third party imports
|
||||
"logging-fstring-interpolation", # Use lazy % formatting in logging functions
|
||||
"ungrouped-imports",
|
||||
"unnecessary-pass",
|
||||
"unnecessary-lambda-assignment",
|
||||
|
||||
Reference in New Issue
Block a user