mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-04-15 20:21:39 +00:00
Compare commits
28 Commits
pysssss/no
...
glsl-cloud
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5332ca60e4 | ||
|
|
40b247f046 | ||
|
|
c2d229a786 | ||
|
|
3c40ee0f02 | ||
|
|
43034b6881 | ||
|
|
bb048d4aaa | ||
|
|
7c1f02d1fa | ||
|
|
292a5918f4 | ||
|
|
0050b66a0b | ||
|
|
0c313f5293 | ||
|
|
1fcf9dca18 | ||
|
|
3b790d24d6 | ||
|
|
92b2b7198a | ||
|
|
309c3e4ec0 | ||
|
|
23591d4388 | ||
|
|
c3d07bec6d | ||
|
|
59b955ff54 | ||
|
|
1263d6fe88 | ||
|
|
23572c6314 | ||
|
|
d809ef8fb1 | ||
|
|
a4317314d2 | ||
|
|
aaea976f36 | ||
|
|
cee092213e | ||
|
|
3da0e9c367 | ||
|
|
9fa8202620 | ||
|
|
b4438c9baf | ||
|
|
cc30293d65 | ||
|
|
866d863128 |
@@ -1,105 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
if TYPE_CHECKING:
|
||||
from comfy_api.latest._io_public import NodeReplace
|
||||
|
||||
from comfy_execution.graph_utils import is_link
|
||||
import nodes
|
||||
|
||||
class NodeStruct(TypedDict):
|
||||
inputs: dict[str, str | int | float | bool | tuple[str, int]]
|
||||
class_type: str
|
||||
_meta: dict[str, str]
|
||||
|
||||
def copy_node_struct(node_struct: NodeStruct, empty_inputs: bool = False) -> NodeStruct:
|
||||
new_node_struct = node_struct.copy()
|
||||
if empty_inputs:
|
||||
new_node_struct["inputs"] = {}
|
||||
else:
|
||||
new_node_struct["inputs"] = node_struct["inputs"].copy()
|
||||
new_node_struct["_meta"] = node_struct["_meta"].copy()
|
||||
return new_node_struct
|
||||
|
||||
|
||||
class NodeReplaceManager:
|
||||
"""Manages node replacement registrations."""
|
||||
|
||||
def __init__(self):
|
||||
self._replacements: dict[str, list[NodeReplace]] = {}
|
||||
|
||||
def register(self, node_replace: NodeReplace):
|
||||
"""Register a node replacement mapping."""
|
||||
self._replacements.setdefault(node_replace.old_node_id, []).append(node_replace)
|
||||
|
||||
def get_replacement(self, old_node_id: str) -> list[NodeReplace] | None:
|
||||
"""Get replacements for an old node ID."""
|
||||
return self._replacements.get(old_node_id)
|
||||
|
||||
def has_replacement(self, old_node_id: str) -> bool:
|
||||
"""Check if a replacement exists for an old node ID."""
|
||||
return old_node_id in self._replacements
|
||||
|
||||
def apply_replacements(self, prompt: dict[str, NodeStruct]):
|
||||
connections: dict[str, list[tuple[str, str, int]]] = {}
|
||||
need_replacement: set[str] = set()
|
||||
for node_number, node_struct in prompt.items():
|
||||
class_type = node_struct["class_type"]
|
||||
# need replacement if not in NODE_CLASS_MAPPINGS and has replacement
|
||||
if class_type not in nodes.NODE_CLASS_MAPPINGS.keys() and self.has_replacement(class_type):
|
||||
need_replacement.add(node_number)
|
||||
# keep track of connections
|
||||
for input_id, input_value in node_struct["inputs"].items():
|
||||
if is_link(input_value):
|
||||
conn_number = input_value[0]
|
||||
connections.setdefault(conn_number, []).append((node_number, input_id, input_value[1]))
|
||||
for node_number in need_replacement:
|
||||
node_struct = prompt[node_number]
|
||||
class_type = node_struct["class_type"]
|
||||
replacements = self.get_replacement(class_type)
|
||||
if replacements is None:
|
||||
continue
|
||||
# just use the first replacement
|
||||
replacement = replacements[0]
|
||||
new_node_id = replacement.new_node_id
|
||||
# if replacement is not a valid node, skip trying to replace it as will only cause confusion
|
||||
if new_node_id not in nodes.NODE_CLASS_MAPPINGS.keys():
|
||||
continue
|
||||
# first, replace node id (class_type)
|
||||
new_node_struct = copy_node_struct(node_struct, empty_inputs=True)
|
||||
new_node_struct["class_type"] = new_node_id
|
||||
# TODO: consider replacing display_name in _meta as well for error reporting purposes; would need to query node schema
|
||||
# second, replace inputs
|
||||
if replacement.input_mapping is not None:
|
||||
for input_map in replacement.input_mapping:
|
||||
if "set_value" in input_map:
|
||||
new_node_struct["inputs"][input_map["new_id"]] = input_map["set_value"]
|
||||
elif "old_id" in input_map:
|
||||
new_node_struct["inputs"][input_map["new_id"]] = node_struct["inputs"][input_map["old_id"]]
|
||||
# finalize input replacement
|
||||
prompt[node_number] = new_node_struct
|
||||
# third, replace outputs
|
||||
if replacement.output_mapping is not None:
|
||||
# re-mapping outputs requires changing the input values of nodes that receive connections from this one
|
||||
if node_number in connections:
|
||||
for conns in connections[node_number]:
|
||||
conn_node_number, conn_input_id, old_output_idx = conns
|
||||
for output_map in replacement.output_mapping:
|
||||
if output_map["old_idx"] == old_output_idx:
|
||||
new_output_idx = output_map["new_idx"]
|
||||
previous_input = prompt[conn_node_number]["inputs"][conn_input_id]
|
||||
previous_input[1] = new_output_idx
|
||||
|
||||
def as_dict(self):
|
||||
"""Serialize all replacements to dict."""
|
||||
return {
|
||||
k: [v.as_dict() for v in v_list]
|
||||
for k, v_list in self._replacements.items()
|
||||
}
|
||||
|
||||
def add_routes(self, routes):
|
||||
@routes.get("/node_replacements")
|
||||
async def get_node_replacements(request):
|
||||
return web.json_response(self.as_dict())
|
||||
44
blueprints/.glsl/Brightness_and_Contrast_1.frag
Normal file
44
blueprints/.glsl/Brightness_and_Contrast_1.frag
Normal file
@@ -0,0 +1,44 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform float u_float0; // Brightness slider -100..100
|
||||
uniform float u_float1; // Contrast slider -100..100
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
const float MID_GRAY = 0.18; // 18% reflectance
|
||||
|
||||
// sRGB gamma 2.2 approximation
|
||||
vec3 srgbToLinear(vec3 c) {
|
||||
return pow(max(c, 0.0), vec3(2.2));
|
||||
}
|
||||
|
||||
vec3 linearToSrgb(vec3 c) {
|
||||
return pow(max(c, 0.0), vec3(1.0/2.2));
|
||||
}
|
||||
|
||||
float mapBrightness(float b) {
|
||||
return clamp(b / 100.0, -1.0, 1.0);
|
||||
}
|
||||
|
||||
float mapContrast(float c) {
|
||||
return clamp(c / 100.0 + 1.0, 0.0, 2.0);
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec4 orig = texture(u_image0, v_texCoord);
|
||||
|
||||
float brightness = mapBrightness(u_float0);
|
||||
float contrast = mapContrast(u_float1);
|
||||
|
||||
vec3 lin = srgbToLinear(orig.rgb);
|
||||
|
||||
lin = (lin - MID_GRAY) * contrast + brightness + MID_GRAY;
|
||||
|
||||
// Convert back to sRGB
|
||||
vec3 result = linearToSrgb(clamp(lin, 0.0, 1.0));
|
||||
|
||||
fragColor = vec4(result, orig.a);
|
||||
}
|
||||
72
blueprints/.glsl/Chromatic_Aberration_16.frag
Normal file
72
blueprints/.glsl/Chromatic_Aberration_16.frag
Normal file
@@ -0,0 +1,72 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform int u_int0; // Mode
|
||||
uniform float u_float0; // Amount (0 to 100)
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
const int MODE_LINEAR = 0;
|
||||
const int MODE_RADIAL = 1;
|
||||
const int MODE_BARREL = 2;
|
||||
const int MODE_SWIRL = 3;
|
||||
const int MODE_DIAGONAL = 4;
|
||||
|
||||
const float AMOUNT_SCALE = 0.0005;
|
||||
const float RADIAL_MULT = 4.0;
|
||||
const float BARREL_MULT = 8.0;
|
||||
const float INV_SQRT2 = 0.70710678118;
|
||||
|
||||
void main() {
|
||||
vec2 uv = v_texCoord;
|
||||
vec4 original = texture(u_image0, uv);
|
||||
|
||||
float amount = u_float0 * AMOUNT_SCALE;
|
||||
|
||||
if (amount < 0.000001) {
|
||||
fragColor = original;
|
||||
return;
|
||||
}
|
||||
|
||||
// Aspect-corrected coordinates for circular effects
|
||||
float aspect = u_resolution.x / u_resolution.y;
|
||||
vec2 centered = uv - 0.5;
|
||||
vec2 corrected = vec2(centered.x * aspect, centered.y);
|
||||
float r = length(corrected);
|
||||
vec2 dir = r > 0.0001 ? corrected / r : vec2(0.0);
|
||||
vec2 offset = vec2(0.0);
|
||||
|
||||
if (u_int0 == MODE_LINEAR) {
|
||||
// Horizontal shift (no aspect correction needed)
|
||||
offset = vec2(amount, 0.0);
|
||||
}
|
||||
else if (u_int0 == MODE_RADIAL) {
|
||||
// Outward from center, stronger at edges
|
||||
offset = dir * r * amount * RADIAL_MULT;
|
||||
offset.x /= aspect; // Convert back to UV space
|
||||
}
|
||||
else if (u_int0 == MODE_BARREL) {
|
||||
// Lens distortion simulation (r² falloff)
|
||||
offset = dir * r * r * amount * BARREL_MULT;
|
||||
offset.x /= aspect; // Convert back to UV space
|
||||
}
|
||||
else if (u_int0 == MODE_SWIRL) {
|
||||
// Perpendicular to radial (rotational aberration)
|
||||
vec2 perp = vec2(-dir.y, dir.x);
|
||||
offset = perp * r * amount * RADIAL_MULT;
|
||||
offset.x /= aspect; // Convert back to UV space
|
||||
}
|
||||
else if (u_int0 == MODE_DIAGONAL) {
|
||||
// 45° offset (no aspect correction needed)
|
||||
offset = vec2(amount, amount) * INV_SQRT2;
|
||||
}
|
||||
|
||||
float red = texture(u_image0, uv + offset).r;
|
||||
float green = original.g;
|
||||
float blue = texture(u_image0, uv - offset).b;
|
||||
|
||||
fragColor = vec4(red, green, blue, original.a);
|
||||
}
|
||||
78
blueprints/.glsl/Color_Adjustment_15.frag
Normal file
78
blueprints/.glsl/Color_Adjustment_15.frag
Normal file
@@ -0,0 +1,78 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform float u_float0; // temperature (-100 to 100)
|
||||
uniform float u_float1; // tint (-100 to 100)
|
||||
uniform float u_float2; // vibrance (-100 to 100)
|
||||
uniform float u_float3; // saturation (-100 to 100)
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
const float INPUT_SCALE = 0.01;
|
||||
const float TEMP_TINT_PRIMARY = 0.3;
|
||||
const float TEMP_TINT_SECONDARY = 0.15;
|
||||
const float VIBRANCE_BOOST = 2.0;
|
||||
const float SATURATION_BOOST = 2.0;
|
||||
const float SKIN_PROTECTION = 0.5;
|
||||
const float EPSILON = 0.001;
|
||||
const vec3 LUMA_WEIGHTS = vec3(0.299, 0.587, 0.114);
|
||||
|
||||
void main() {
|
||||
vec4 tex = texture(u_image0, v_texCoord);
|
||||
vec3 color = tex.rgb;
|
||||
|
||||
// Scale inputs: -100/100 → -1/1
|
||||
float temperature = u_float0 * INPUT_SCALE;
|
||||
float tint = u_float1 * INPUT_SCALE;
|
||||
float vibrance = u_float2 * INPUT_SCALE;
|
||||
float saturation = u_float3 * INPUT_SCALE;
|
||||
|
||||
// Temperature (warm/cool): positive = warm, negative = cool
|
||||
color.r += temperature * TEMP_TINT_PRIMARY;
|
||||
color.b -= temperature * TEMP_TINT_PRIMARY;
|
||||
|
||||
// Tint (green/magenta): positive = green, negative = magenta
|
||||
color.g += tint * TEMP_TINT_PRIMARY;
|
||||
color.r -= tint * TEMP_TINT_SECONDARY;
|
||||
color.b -= tint * TEMP_TINT_SECONDARY;
|
||||
|
||||
// Single clamp after temperature/tint
|
||||
color = clamp(color, 0.0, 1.0);
|
||||
|
||||
// Vibrance with skin protection
|
||||
if (vibrance != 0.0) {
|
||||
float maxC = max(color.r, max(color.g, color.b));
|
||||
float minC = min(color.r, min(color.g, color.b));
|
||||
float sat = maxC - minC;
|
||||
float gray = dot(color, LUMA_WEIGHTS);
|
||||
|
||||
if (vibrance < 0.0) {
|
||||
// Desaturate: -100 → gray
|
||||
color = mix(vec3(gray), color, 1.0 + vibrance);
|
||||
} else {
|
||||
// Boost less saturated colors more
|
||||
float vibranceAmt = vibrance * (1.0 - sat);
|
||||
|
||||
// Branchless skin tone protection
|
||||
float isWarmTone = step(color.b, color.g) * step(color.g, color.r);
|
||||
float warmth = (color.r - color.b) / max(maxC, EPSILON);
|
||||
float skinTone = isWarmTone * warmth * sat * (1.0 - sat);
|
||||
vibranceAmt *= (1.0 - skinTone * SKIN_PROTECTION);
|
||||
|
||||
color = mix(vec3(gray), color, 1.0 + vibranceAmt * VIBRANCE_BOOST);
|
||||
}
|
||||
}
|
||||
|
||||
// Saturation
|
||||
if (saturation != 0.0) {
|
||||
float gray = dot(color, LUMA_WEIGHTS);
|
||||
float satMix = saturation < 0.0
|
||||
? 1.0 + saturation // -100 → gray
|
||||
: 1.0 + saturation * SATURATION_BOOST; // +100 → 3x boost
|
||||
color = mix(vec3(gray), color, satMix);
|
||||
}
|
||||
|
||||
fragColor = vec4(clamp(color, 0.0, 1.0), tex.a);
|
||||
}
|
||||
94
blueprints/.glsl/Edge-Preserving_Blur_128.frag
Normal file
94
blueprints/.glsl/Edge-Preserving_Blur_128.frag
Normal file
@@ -0,0 +1,94 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform float u_float0; // Blur radius (0–20, default ~5)
|
||||
uniform float u_float1; // Edge threshold (0–100, default ~30)
|
||||
uniform int u_int0; // Step size (0/1 = every pixel, 2+ = skip pixels)
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
const int MAX_RADIUS = 20;
|
||||
const float EPSILON = 0.0001;
|
||||
|
||||
// Perceptual luminance
|
||||
float getLuminance(vec3 rgb) {
|
||||
return dot(rgb, vec3(0.299, 0.587, 0.114));
|
||||
}
|
||||
|
||||
vec4 bilateralFilter(vec2 uv, vec2 texelSize, int radius,
|
||||
float sigmaSpatial, float sigmaColor)
|
||||
{
|
||||
vec4 center = texture(u_image0, uv);
|
||||
vec3 centerRGB = center.rgb;
|
||||
|
||||
float invSpatial2 = -0.5 / (sigmaSpatial * sigmaSpatial);
|
||||
float invColor2 = -0.5 / (sigmaColor * sigmaColor + EPSILON);
|
||||
|
||||
vec3 sumRGB = vec3(0.0);
|
||||
float sumWeight = 0.0;
|
||||
|
||||
int step = max(u_int0, 1);
|
||||
float radius2 = float(radius * radius);
|
||||
|
||||
for (int dy = -MAX_RADIUS; dy <= MAX_RADIUS; dy++) {
|
||||
if (dy < -radius || dy > radius) continue;
|
||||
if (abs(dy) % step != 0) continue;
|
||||
|
||||
for (int dx = -MAX_RADIUS; dx <= MAX_RADIUS; dx++) {
|
||||
if (dx < -radius || dx > radius) continue;
|
||||
if (abs(dx) % step != 0) continue;
|
||||
|
||||
vec2 offset = vec2(float(dx), float(dy));
|
||||
float dist2 = dot(offset, offset);
|
||||
if (dist2 > radius2) continue;
|
||||
|
||||
vec3 sampleRGB = texture(u_image0, uv + offset * texelSize).rgb;
|
||||
|
||||
// Spatial Gaussian
|
||||
float spatialWeight = exp(dist2 * invSpatial2);
|
||||
|
||||
// Perceptual color distance (weighted RGB)
|
||||
vec3 diff = sampleRGB - centerRGB;
|
||||
float colorDist = dot(diff * diff, vec3(0.299, 0.587, 0.114));
|
||||
float colorWeight = exp(colorDist * invColor2);
|
||||
|
||||
float w = spatialWeight * colorWeight;
|
||||
sumRGB += sampleRGB * w;
|
||||
sumWeight += w;
|
||||
}
|
||||
}
|
||||
|
||||
vec3 resultRGB = sumRGB / max(sumWeight, EPSILON);
|
||||
return vec4(resultRGB, center.a); // preserve center alpha
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));
|
||||
|
||||
float radiusF = clamp(u_float0, 0.0, float(MAX_RADIUS));
|
||||
int radius = int(radiusF + 0.5);
|
||||
|
||||
if (radius == 0) {
|
||||
fragColor = texture(u_image0, v_texCoord);
|
||||
return;
|
||||
}
|
||||
|
||||
// Edge threshold → color sigma
|
||||
// Squared curve for better low-end control
|
||||
float t = clamp(u_float1, 0.0, 100.0) / 100.0;
|
||||
t *= t;
|
||||
float sigmaColor = mix(0.01, 0.5, t);
|
||||
|
||||
// Spatial sigma tied to radius
|
||||
float sigmaSpatial = max(radiusF * 0.75, 0.5);
|
||||
|
||||
fragColor = bilateralFilter(
|
||||
v_texCoord,
|
||||
texelSize,
|
||||
radius,
|
||||
sigmaSpatial,
|
||||
sigmaColor
|
||||
);
|
||||
}
|
||||
124
blueprints/.glsl/Film_Grain_15.frag
Normal file
124
blueprints/.glsl/Film_Grain_15.frag
Normal file
@@ -0,0 +1,124 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_float0; // grain amount [0.0 – 1.0] typical: 0.2–0.8
|
||||
uniform float u_float1; // grain size [0.3 – 3.0] lower = finer grain
|
||||
uniform float u_float2; // color amount [0.0 – 1.0] 0 = monochrome, 1 = RGB grain
|
||||
uniform float u_float3; // luminance bias [0.0 – 1.0] 0 = uniform, 1 = shadows only
|
||||
uniform int u_int0; // noise mode [0 or 1] 0 = smooth, 1 = grainy
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
// High-quality integer hash (pcg-like)
|
||||
uint pcg(uint v) {
|
||||
uint state = v * 747796405u + 2891336453u;
|
||||
uint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
|
||||
return (word >> 22u) ^ word;
|
||||
}
|
||||
|
||||
// 2D -> 1D hash input
|
||||
uint hash2d(uvec2 p) {
|
||||
return pcg(p.x + pcg(p.y));
|
||||
}
|
||||
|
||||
// Hash to float [0, 1]
|
||||
float hashf(uvec2 p) {
|
||||
return float(hash2d(p)) / float(0xffffffffu);
|
||||
}
|
||||
|
||||
// Hash to float with offset (for RGB channels)
|
||||
float hashf(uvec2 p, uint offset) {
|
||||
return float(pcg(hash2d(p) + offset)) / float(0xffffffffu);
|
||||
}
|
||||
|
||||
// Convert uniform [0,1] to roughly Gaussian distribution
|
||||
// Using simple approximation: average of multiple samples
|
||||
float toGaussian(uvec2 p) {
|
||||
float sum = hashf(p, 0u) + hashf(p, 1u) + hashf(p, 2u) + hashf(p, 3u);
|
||||
return (sum - 2.0) * 0.7; // Centered, scaled
|
||||
}
|
||||
|
||||
float toGaussian(uvec2 p, uint offset) {
|
||||
float sum = hashf(p, offset) + hashf(p, offset + 1u)
|
||||
+ hashf(p, offset + 2u) + hashf(p, offset + 3u);
|
||||
return (sum - 2.0) * 0.7;
|
||||
}
|
||||
|
||||
// Smooth noise with better interpolation
|
||||
float smoothNoise(vec2 p) {
|
||||
vec2 i = floor(p);
|
||||
vec2 f = fract(p);
|
||||
|
||||
// Quintic interpolation (less banding than cubic)
|
||||
f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);
|
||||
|
||||
uvec2 ui = uvec2(i);
|
||||
float a = toGaussian(ui);
|
||||
float b = toGaussian(ui + uvec2(1u, 0u));
|
||||
float c = toGaussian(ui + uvec2(0u, 1u));
|
||||
float d = toGaussian(ui + uvec2(1u, 1u));
|
||||
|
||||
return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);
|
||||
}
|
||||
|
||||
float smoothNoise(vec2 p, uint offset) {
|
||||
vec2 i = floor(p);
|
||||
vec2 f = fract(p);
|
||||
|
||||
f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);
|
||||
|
||||
uvec2 ui = uvec2(i);
|
||||
float a = toGaussian(ui, offset);
|
||||
float b = toGaussian(ui + uvec2(1u, 0u), offset);
|
||||
float c = toGaussian(ui + uvec2(0u, 1u), offset);
|
||||
float d = toGaussian(ui + uvec2(1u, 1u), offset);
|
||||
|
||||
return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec4 color = texture(u_image0, v_texCoord);
|
||||
|
||||
// Luminance (Rec.709)
|
||||
float luma = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722));
|
||||
|
||||
// Grain UV (resolution-independent)
|
||||
vec2 grainUV = v_texCoord * u_resolution / max(u_float1, 0.01);
|
||||
uvec2 grainPixel = uvec2(grainUV);
|
||||
|
||||
float g;
|
||||
vec3 grainRGB;
|
||||
|
||||
if (u_int0 == 1) {
|
||||
// Grainy mode: pure hash noise (no interpolation = no banding)
|
||||
g = toGaussian(grainPixel);
|
||||
grainRGB = vec3(
|
||||
toGaussian(grainPixel, 100u),
|
||||
toGaussian(grainPixel, 200u),
|
||||
toGaussian(grainPixel, 300u)
|
||||
);
|
||||
} else {
|
||||
// Smooth mode: interpolated with quintic curve
|
||||
g = smoothNoise(grainUV);
|
||||
grainRGB = vec3(
|
||||
smoothNoise(grainUV, 100u),
|
||||
smoothNoise(grainUV, 200u),
|
||||
smoothNoise(grainUV, 300u)
|
||||
);
|
||||
}
|
||||
|
||||
// Luminance weighting (less grain in highlights)
|
||||
float lumWeight = mix(1.0, 1.0 - luma, clamp(u_float3, 0.0, 1.0));
|
||||
|
||||
// Strength
|
||||
float strength = u_float0 * 0.15;
|
||||
|
||||
// Color vs monochrome grain
|
||||
vec3 grainColor = mix(vec3(g), grainRGB, clamp(u_float2, 0.0, 1.0));
|
||||
|
||||
color.rgb += grainColor * strength * lumWeight;
|
||||
fragColor0 = vec4(clamp(color.rgb, 0.0, 1.0), color.a);
|
||||
}
|
||||
133
blueprints/.glsl/Glow_30.frag
Normal file
133
blueprints/.glsl/Glow_30.frag
Normal file
@@ -0,0 +1,133 @@
|
||||
#version 300 es
|
||||
precision mediump float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform int u_int0; // Blend mode
|
||||
uniform int u_int1; // Color tint
|
||||
uniform float u_float0; // Intensity
|
||||
uniform float u_float1; // Radius
|
||||
uniform float u_float2; // Threshold
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
const int BLEND_ADD = 0;
|
||||
const int BLEND_SCREEN = 1;
|
||||
const int BLEND_SOFT = 2;
|
||||
const int BLEND_OVERLAY = 3;
|
||||
const int BLEND_LIGHTEN = 4;
|
||||
|
||||
const float GOLDEN_ANGLE = 2.39996323;
|
||||
const int MAX_SAMPLES = 48;
|
||||
const vec3 LUMA = vec3(0.299, 0.587, 0.114);
|
||||
|
||||
float hash(vec2 p) {
|
||||
p = fract(p * vec2(123.34, 456.21));
|
||||
p += dot(p, p + 45.32);
|
||||
return fract(p.x * p.y);
|
||||
}
|
||||
|
||||
vec3 hexToRgb(int h) {
|
||||
return vec3(
|
||||
float((h >> 16) & 255),
|
||||
float((h >> 8) & 255),
|
||||
float(h & 255)
|
||||
) * (1.0 / 255.0);
|
||||
}
|
||||
|
||||
vec3 blend(vec3 base, vec3 glow, int mode) {
|
||||
if (mode == BLEND_SCREEN) {
|
||||
return 1.0 - (1.0 - base) * (1.0 - glow);
|
||||
}
|
||||
if (mode == BLEND_SOFT) {
|
||||
return mix(
|
||||
base - (1.0 - 2.0 * glow) * base * (1.0 - base),
|
||||
base + (2.0 * glow - 1.0) * (sqrt(base) - base),
|
||||
step(0.5, glow)
|
||||
);
|
||||
}
|
||||
if (mode == BLEND_OVERLAY) {
|
||||
return mix(
|
||||
2.0 * base * glow,
|
||||
1.0 - 2.0 * (1.0 - base) * (1.0 - glow),
|
||||
step(0.5, base)
|
||||
);
|
||||
}
|
||||
if (mode == BLEND_LIGHTEN) {
|
||||
return max(base, glow);
|
||||
}
|
||||
return base + glow;
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec4 original = texture(u_image0, v_texCoord);
|
||||
|
||||
float intensity = u_float0 * 0.05;
|
||||
float radius = u_float1 * u_float1 * 0.012;
|
||||
|
||||
if (intensity < 0.001 || radius < 0.1) {
|
||||
fragColor = original;
|
||||
return;
|
||||
}
|
||||
|
||||
float threshold = 1.0 - u_float2 * 0.01;
|
||||
float t0 = threshold - 0.15;
|
||||
float t1 = threshold + 0.15;
|
||||
|
||||
vec2 texelSize = 1.0 / u_resolution;
|
||||
float radius2 = radius * radius;
|
||||
|
||||
float sampleScale = clamp(radius * 0.75, 0.35, 1.0);
|
||||
int samples = int(float(MAX_SAMPLES) * sampleScale);
|
||||
|
||||
float noise = hash(gl_FragCoord.xy);
|
||||
float angleOffset = noise * GOLDEN_ANGLE;
|
||||
float radiusJitter = 0.85 + noise * 0.3;
|
||||
|
||||
float ca = cos(GOLDEN_ANGLE);
|
||||
float sa = sin(GOLDEN_ANGLE);
|
||||
vec2 dir = vec2(cos(angleOffset), sin(angleOffset));
|
||||
|
||||
vec3 glow = vec3(0.0);
|
||||
float totalWeight = 0.0;
|
||||
|
||||
// Center tap
|
||||
float centerMask = smoothstep(t0, t1, dot(original.rgb, LUMA));
|
||||
glow += original.rgb * centerMask * 2.0;
|
||||
totalWeight += 2.0;
|
||||
|
||||
for (int i = 1; i < MAX_SAMPLES; i++) {
|
||||
if (i >= samples) break;
|
||||
|
||||
float fi = float(i);
|
||||
float dist = sqrt(fi / float(samples)) * radius * radiusJitter;
|
||||
|
||||
vec2 offset = dir * dist * texelSize;
|
||||
vec3 c = texture(u_image0, v_texCoord + offset).rgb;
|
||||
float mask = smoothstep(t0, t1, dot(c, LUMA));
|
||||
|
||||
float w = 1.0 - (dist * dist) / (radius2 * 1.5);
|
||||
w = max(w, 0.0);
|
||||
w *= w;
|
||||
|
||||
glow += c * mask * w;
|
||||
totalWeight += w;
|
||||
|
||||
dir = vec2(
|
||||
dir.x * ca - dir.y * sa,
|
||||
dir.x * sa + dir.y * ca
|
||||
);
|
||||
}
|
||||
|
||||
glow *= intensity / max(totalWeight, 0.001);
|
||||
|
||||
if (u_int1 > 0) {
|
||||
glow *= hexToRgb(u_int1);
|
||||
}
|
||||
|
||||
vec3 result = blend(original.rgb, glow, u_int0);
|
||||
result += (noise - 0.5) * (1.0 / 255.0);
|
||||
|
||||
fragColor = vec4(clamp(result, 0.0, 1.0), original.a);
|
||||
}
|
||||
222
blueprints/.glsl/Hue_and_Saturation_1.frag
Normal file
222
blueprints/.glsl/Hue_and_Saturation_1.frag
Normal file
@@ -0,0 +1,222 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform int u_int0; // Mode: 0=Master, 1=Reds, 2=Yellows, 3=Greens, 4=Cyans, 5=Blues, 6=Magentas, 7=Colorize
|
||||
uniform int u_int1; // Color Space: 0=HSL, 1=HSB/HSV
|
||||
uniform float u_float0; // Hue (-180 to 180)
|
||||
uniform float u_float1; // Saturation (-100 to 100)
|
||||
uniform float u_float2; // Lightness/Brightness (-100 to 100)
|
||||
uniform float u_float3; // Overlap (0 to 100) - feathering between adjacent color ranges
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
// Color range modes
|
||||
const int MODE_MASTER = 0;
|
||||
const int MODE_RED = 1;
|
||||
const int MODE_YELLOW = 2;
|
||||
const int MODE_GREEN = 3;
|
||||
const int MODE_CYAN = 4;
|
||||
const int MODE_BLUE = 5;
|
||||
const int MODE_MAGENTA = 6;
|
||||
const int MODE_COLORIZE = 7;
|
||||
|
||||
// Color space modes
|
||||
const int COLORSPACE_HSL = 0;
|
||||
const int COLORSPACE_HSB = 1;
|
||||
|
||||
const float EPSILON = 0.0001;
|
||||
|
||||
//=============================================================================
|
||||
// RGB <-> HSL Conversions
|
||||
//=============================================================================
|
||||
|
||||
vec3 rgb2hsl(vec3 c) {
|
||||
float maxC = max(max(c.r, c.g), c.b);
|
||||
float minC = min(min(c.r, c.g), c.b);
|
||||
float delta = maxC - minC;
|
||||
|
||||
float h = 0.0;
|
||||
float s = 0.0;
|
||||
float l = (maxC + minC) * 0.5;
|
||||
|
||||
if (delta > EPSILON) {
|
||||
s = l < 0.5
|
||||
? delta / (maxC + minC)
|
||||
: delta / (2.0 - maxC - minC);
|
||||
|
||||
if (maxC == c.r) {
|
||||
h = (c.g - c.b) / delta + (c.g < c.b ? 6.0 : 0.0);
|
||||
} else if (maxC == c.g) {
|
||||
h = (c.b - c.r) / delta + 2.0;
|
||||
} else {
|
||||
h = (c.r - c.g) / delta + 4.0;
|
||||
}
|
||||
h /= 6.0;
|
||||
}
|
||||
|
||||
return vec3(h, s, l);
|
||||
}
|
||||
|
||||
float hue2rgb(float p, float q, float t) {
|
||||
t = fract(t);
|
||||
if (t < 1.0/6.0) return p + (q - p) * 6.0 * t;
|
||||
if (t < 0.5) return q;
|
||||
if (t < 2.0/3.0) return p + (q - p) * (2.0/3.0 - t) * 6.0;
|
||||
return p;
|
||||
}
|
||||
|
||||
vec3 hsl2rgb(vec3 hsl) {
|
||||
if (hsl.y < EPSILON) return vec3(hsl.z);
|
||||
|
||||
float q = hsl.z < 0.5
|
||||
? hsl.z * (1.0 + hsl.y)
|
||||
: hsl.z + hsl.y - hsl.z * hsl.y;
|
||||
float p = 2.0 * hsl.z - q;
|
||||
|
||||
return vec3(
|
||||
hue2rgb(p, q, hsl.x + 1.0/3.0),
|
||||
hue2rgb(p, q, hsl.x),
|
||||
hue2rgb(p, q, hsl.x - 1.0/3.0)
|
||||
);
|
||||
}
|
||||
|
||||
vec3 rgb2hsb(vec3 c) {
|
||||
float maxC = max(max(c.r, c.g), c.b);
|
||||
float minC = min(min(c.r, c.g), c.b);
|
||||
float delta = maxC - minC;
|
||||
|
||||
float h = 0.0;
|
||||
float s = (maxC > EPSILON) ? delta / maxC : 0.0;
|
||||
float b = maxC;
|
||||
|
||||
if (delta > EPSILON) {
|
||||
if (maxC == c.r) {
|
||||
h = (c.g - c.b) / delta + (c.g < c.b ? 6.0 : 0.0);
|
||||
} else if (maxC == c.g) {
|
||||
h = (c.b - c.r) / delta + 2.0;
|
||||
} else {
|
||||
h = (c.r - c.g) / delta + 4.0;
|
||||
}
|
||||
h /= 6.0;
|
||||
}
|
||||
|
||||
return vec3(h, s, b);
|
||||
}
|
||||
|
||||
vec3 hsb2rgb(vec3 hsb) {
|
||||
vec3 rgb = clamp(abs(mod(hsb.x * 6.0 + vec3(0.0, 4.0, 2.0), 6.0) - 3.0) - 1.0, 0.0, 1.0);
|
||||
return hsb.z * mix(vec3(1.0), rgb, hsb.y);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Color Range Weight Calculation
|
||||
//=============================================================================
|
||||
|
||||
float hueDistance(float a, float b) {
|
||||
float d = abs(a - b);
|
||||
return min(d, 1.0 - d);
|
||||
}
|
||||
|
||||
float getHueWeight(float hue, float center, float overlap) {
|
||||
float baseWidth = 1.0 / 6.0;
|
||||
float feather = baseWidth * overlap;
|
||||
|
||||
float d = hueDistance(hue, center);
|
||||
|
||||
float inner = baseWidth * 0.5;
|
||||
float outer = inner + feather;
|
||||
|
||||
return 1.0 - smoothstep(inner, outer, d);
|
||||
}
|
||||
|
||||
float getModeWeight(float hue, int mode, float overlap) {
|
||||
if (mode == MODE_MASTER || mode == MODE_COLORIZE) return 1.0;
|
||||
|
||||
if (mode == MODE_RED) {
|
||||
return max(
|
||||
getHueWeight(hue, 0.0, overlap),
|
||||
getHueWeight(hue, 1.0, overlap)
|
||||
);
|
||||
}
|
||||
|
||||
float center = float(mode - 1) / 6.0;
|
||||
return getHueWeight(hue, center, overlap);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Adjustment Functions
|
||||
//=============================================================================
|
||||
|
||||
float adjustLightness(float l, float amount) {
|
||||
return amount > 0.0
|
||||
? l + (1.0 - l) * amount
|
||||
: l + l * amount;
|
||||
}
|
||||
|
||||
float adjustBrightness(float b, float amount) {
|
||||
return clamp(b + amount, 0.0, 1.0);
|
||||
}
|
||||
|
||||
float adjustSaturation(float s, float amount) {
|
||||
return amount > 0.0
|
||||
? s + (1.0 - s) * amount
|
||||
: s + s * amount;
|
||||
}
|
||||
|
||||
vec3 colorize(vec3 rgb, float hue, float sat, float light) {
|
||||
float lum = dot(rgb, vec3(0.299, 0.587, 0.114));
|
||||
float l = adjustLightness(lum, light);
|
||||
|
||||
vec3 hsl = vec3(fract(hue), clamp(abs(sat), 0.0, 1.0), clamp(l, 0.0, 1.0));
|
||||
return hsl2rgb(hsl);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Main
|
||||
//=============================================================================
|
||||
|
||||
void main() {
|
||||
vec4 original = texture(u_image0, v_texCoord);
|
||||
|
||||
float hueShift = u_float0 / 360.0; // -180..180 -> -0.5..0.5
|
||||
float satAmount = u_float1 / 100.0; // -100..100 -> -1..1
|
||||
float lightAmount= u_float2 / 100.0; // -100..100 -> -1..1
|
||||
float overlap = u_float3 / 100.0; // 0..100 -> 0..1
|
||||
|
||||
vec3 result;
|
||||
|
||||
if (u_int0 == MODE_COLORIZE) {
|
||||
result = colorize(original.rgb, hueShift, satAmount, lightAmount);
|
||||
fragColor = vec4(result, original.a);
|
||||
return;
|
||||
}
|
||||
|
||||
vec3 hsx = (u_int1 == COLORSPACE_HSL)
|
||||
? rgb2hsl(original.rgb)
|
||||
: rgb2hsb(original.rgb);
|
||||
|
||||
float weight = getModeWeight(hsx.x, u_int0, overlap);
|
||||
|
||||
if (u_int0 != MODE_MASTER && hsx.y < EPSILON) {
|
||||
weight = 0.0;
|
||||
}
|
||||
|
||||
if (weight > EPSILON) {
|
||||
float h = fract(hsx.x + hueShift * weight);
|
||||
float s = clamp(adjustSaturation(hsx.y, satAmount * weight), 0.0, 1.0);
|
||||
float v = (u_int1 == COLORSPACE_HSL)
|
||||
? clamp(adjustLightness(hsx.z, lightAmount * weight), 0.0, 1.0)
|
||||
: clamp(adjustBrightness(hsx.z, lightAmount * weight), 0.0, 1.0);
|
||||
|
||||
vec3 adjusted = vec3(h, s, v);
|
||||
result = (u_int1 == COLORSPACE_HSL)
|
||||
? hsl2rgb(adjusted)
|
||||
: hsb2rgb(adjusted);
|
||||
} else {
|
||||
result = original.rgb;
|
||||
}
|
||||
|
||||
fragColor = vec4(result, original.a);
|
||||
}
|
||||
111
blueprints/.glsl/Image_Blur_1.frag
Normal file
111
blueprints/.glsl/Image_Blur_1.frag
Normal file
@@ -0,0 +1,111 @@
|
||||
#version 300 es
|
||||
#pragma passes 2
|
||||
precision highp float;
|
||||
|
||||
// Blur type constants
|
||||
const int BLUR_GAUSSIAN = 0;
|
||||
const int BLUR_BOX = 1;
|
||||
const int BLUR_RADIAL = 2;
|
||||
|
||||
// Radial blur config
|
||||
const int RADIAL_SAMPLES = 12;
|
||||
const float RADIAL_STRENGTH = 0.0003;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform int u_int0; // Blur type (BLUR_GAUSSIAN, BLUR_BOX, BLUR_RADIAL)
|
||||
uniform float u_float0; // Blur radius/amount
|
||||
uniform int u_pass; // Pass index (0 = horizontal, 1 = vertical)
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
float gaussian(float x, float sigma) {
|
||||
return exp(-(x * x) / (2.0 * sigma * sigma));
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec2 texelSize = 1.0 / u_resolution;
|
||||
float radius = max(u_float0, 0.0);
|
||||
|
||||
// Radial (angular) blur - single pass, doesn't use separable
|
||||
if (u_int0 == BLUR_RADIAL) {
|
||||
// Only execute on first pass
|
||||
if (u_pass > 0) {
|
||||
fragColor0 = texture(u_image0, v_texCoord);
|
||||
return;
|
||||
}
|
||||
|
||||
vec2 center = vec2(0.5);
|
||||
vec2 dir = v_texCoord - center;
|
||||
float dist = length(dir);
|
||||
|
||||
if (dist < 1e-4) {
|
||||
fragColor0 = texture(u_image0, v_texCoord);
|
||||
return;
|
||||
}
|
||||
|
||||
vec4 sum = vec4(0.0);
|
||||
float totalWeight = 0.0;
|
||||
float angleStep = radius * RADIAL_STRENGTH;
|
||||
|
||||
dir /= dist;
|
||||
|
||||
float cosStep = cos(angleStep);
|
||||
float sinStep = sin(angleStep);
|
||||
|
||||
float negAngle = -float(RADIAL_SAMPLES) * angleStep;
|
||||
vec2 rotDir = vec2(
|
||||
dir.x * cos(negAngle) - dir.y * sin(negAngle),
|
||||
dir.x * sin(negAngle) + dir.y * cos(negAngle)
|
||||
);
|
||||
|
||||
for (int i = -RADIAL_SAMPLES; i <= RADIAL_SAMPLES; i++) {
|
||||
vec2 uv = center + rotDir * dist;
|
||||
float w = 1.0 - abs(float(i)) / float(RADIAL_SAMPLES);
|
||||
sum += texture(u_image0, uv) * w;
|
||||
totalWeight += w;
|
||||
|
||||
rotDir = vec2(
|
||||
rotDir.x * cosStep - rotDir.y * sinStep,
|
||||
rotDir.x * sinStep + rotDir.y * cosStep
|
||||
);
|
||||
}
|
||||
|
||||
fragColor0 = sum / max(totalWeight, 0.001);
|
||||
return;
|
||||
}
|
||||
|
||||
// Separable Gaussian / Box blur
|
||||
int samples = int(ceil(radius));
|
||||
|
||||
if (samples == 0) {
|
||||
fragColor0 = texture(u_image0, v_texCoord);
|
||||
return;
|
||||
}
|
||||
|
||||
// Direction: pass 0 = horizontal, pass 1 = vertical
|
||||
vec2 dir = (u_pass == 0) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
|
||||
|
||||
vec4 color = vec4(0.0);
|
||||
float totalWeight = 0.0;
|
||||
float sigma = radius / 2.0;
|
||||
|
||||
for (int i = -samples; i <= samples; i++) {
|
||||
vec2 offset = dir * float(i) * texelSize;
|
||||
vec4 sample_color = texture(u_image0, v_texCoord + offset);
|
||||
|
||||
float weight;
|
||||
if (u_int0 == BLUR_GAUSSIAN) {
|
||||
weight = gaussian(float(i), sigma);
|
||||
} else {
|
||||
// BLUR_BOX
|
||||
weight = 1.0;
|
||||
}
|
||||
|
||||
color += sample_color * weight;
|
||||
totalWeight += weight;
|
||||
}
|
||||
|
||||
fragColor0 = color / totalWeight;
|
||||
}
|
||||
19
blueprints/.glsl/Image_Channels_23.frag
Normal file
19
blueprints/.glsl/Image_Channels_23.frag
Normal file
@@ -0,0 +1,19 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
layout(location = 1) out vec4 fragColor1;
|
||||
layout(location = 2) out vec4 fragColor2;
|
||||
layout(location = 3) out vec4 fragColor3;
|
||||
|
||||
void main() {
|
||||
vec4 color = texture(u_image0, v_texCoord);
|
||||
// Output each channel as grayscale to separate render targets
|
||||
fragColor0 = vec4(vec3(color.r), 1.0); // Red channel
|
||||
fragColor1 = vec4(vec3(color.g), 1.0); // Green channel
|
||||
fragColor2 = vec4(vec3(color.b), 1.0); // Blue channel
|
||||
fragColor3 = vec4(vec3(color.a), 1.0); // Alpha channel
|
||||
}
|
||||
71
blueprints/.glsl/Image_Levels_1.frag
Normal file
71
blueprints/.glsl/Image_Levels_1.frag
Normal file
@@ -0,0 +1,71 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
// Levels Adjustment
|
||||
// u_int0: channel (0=RGB, 1=R, 2=G, 3=B) default: 0
|
||||
// u_float0: input black (0-255) default: 0
|
||||
// u_float1: input white (0-255) default: 255
|
||||
// u_float2: gamma (0.01-9.99) default: 1.0
|
||||
// u_float3: output black (0-255) default: 0
|
||||
// u_float4: output white (0-255) default: 255
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform int u_int0;
|
||||
uniform float u_float0;
|
||||
uniform float u_float1;
|
||||
uniform float u_float2;
|
||||
uniform float u_float3;
|
||||
uniform float u_float4;
|
||||
|
||||
in vec2 v_texCoord;
|
||||
out vec4 fragColor;
|
||||
|
||||
vec3 applyLevels(vec3 color, float inBlack, float inWhite, float gamma, float outBlack, float outWhite) {
|
||||
float inRange = max(inWhite - inBlack, 0.0001);
|
||||
vec3 result = clamp((color - inBlack) / inRange, 0.0, 1.0);
|
||||
result = pow(result, vec3(1.0 / gamma));
|
||||
result = mix(vec3(outBlack), vec3(outWhite), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
float applySingleChannel(float value, float inBlack, float inWhite, float gamma, float outBlack, float outWhite) {
|
||||
float inRange = max(inWhite - inBlack, 0.0001);
|
||||
float result = clamp((value - inBlack) / inRange, 0.0, 1.0);
|
||||
result = pow(result, 1.0 / gamma);
|
||||
result = mix(outBlack, outWhite, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec4 texColor = texture(u_image0, v_texCoord);
|
||||
vec3 color = texColor.rgb;
|
||||
|
||||
float inBlack = u_float0 / 255.0;
|
||||
float inWhite = u_float1 / 255.0;
|
||||
float gamma = u_float2;
|
||||
float outBlack = u_float3 / 255.0;
|
||||
float outWhite = u_float4 / 255.0;
|
||||
|
||||
vec3 result;
|
||||
|
||||
if (u_int0 == 0) {
|
||||
result = applyLevels(color, inBlack, inWhite, gamma, outBlack, outWhite);
|
||||
}
|
||||
else if (u_int0 == 1) {
|
||||
result = color;
|
||||
result.r = applySingleChannel(color.r, inBlack, inWhite, gamma, outBlack, outWhite);
|
||||
}
|
||||
else if (u_int0 == 2) {
|
||||
result = color;
|
||||
result.g = applySingleChannel(color.g, inBlack, inWhite, gamma, outBlack, outWhite);
|
||||
}
|
||||
else if (u_int0 == 3) {
|
||||
result = color;
|
||||
result.b = applySingleChannel(color.b, inBlack, inWhite, gamma, outBlack, outWhite);
|
||||
}
|
||||
else {
|
||||
result = color;
|
||||
}
|
||||
|
||||
fragColor = vec4(result, texColor.a);
|
||||
}
|
||||
28
blueprints/.glsl/README.md
Normal file
28
blueprints/.glsl/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# GLSL Shader Sources
|
||||
|
||||
This folder contains the GLSL fragment shaders extracted from blueprint JSON files for easier editing and version control.
|
||||
|
||||
## File Naming Convention
|
||||
|
||||
`{Blueprint_Name}_{node_id}.frag`
|
||||
|
||||
- **Blueprint_Name**: The JSON filename with spaces/special chars replaced by underscores
|
||||
- **node_id**: The GLSLShader node ID within the subgraph
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Extract shaders from blueprint JSONs to this folder
|
||||
python update_blueprints.py extract
|
||||
|
||||
# Patch edited shaders back into blueprint JSONs
|
||||
python update_blueprints.py patch
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Run `extract` to pull current shaders from JSONs
|
||||
2. Edit `.frag` files
|
||||
3. Run `patch` to update the blueprint JSONs
|
||||
4. Test
|
||||
5. Commit both `.frag` files and updated JSONs
|
||||
28
blueprints/.glsl/Sharpen_23.frag
Normal file
28
blueprints/.glsl/Sharpen_23.frag
Normal file
@@ -0,0 +1,28 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_float0; // strength [0.0 – 2.0] typical: 0.3–1.0
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
void main() {
|
||||
vec2 texel = 1.0 / u_resolution;
|
||||
|
||||
// Sample center and neighbors
|
||||
vec4 center = texture(u_image0, v_texCoord);
|
||||
vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));
|
||||
vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));
|
||||
vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));
|
||||
vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));
|
||||
|
||||
// Edge enhancement (Laplacian)
|
||||
vec4 edges = center * 4.0 - top - bottom - left - right;
|
||||
|
||||
// Add edges back scaled by strength
|
||||
vec4 sharpened = center + edges * u_float0;
|
||||
|
||||
fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);
|
||||
}
|
||||
61
blueprints/.glsl/Unsharp_Mask_26.frag
Normal file
61
blueprints/.glsl/Unsharp_Mask_26.frag
Normal file
@@ -0,0 +1,61 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
uniform float u_float0; // amount [0.0 - 3.0] typical: 0.5-1.5
|
||||
uniform float u_float1; // radius [0.5 - 10.0] blur radius in pixels
|
||||
uniform float u_float2; // threshold [0.0 - 0.1] min difference to sharpen
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
float gaussian(float x, float sigma) {
|
||||
return exp(-(x * x) / (2.0 * sigma * sigma));
|
||||
}
|
||||
|
||||
float getLuminance(vec3 color) {
|
||||
return dot(color, vec3(0.2126, 0.7152, 0.0722));
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec2 texel = 1.0 / u_resolution;
|
||||
float radius = max(u_float1, 0.5);
|
||||
float amount = u_float0;
|
||||
float threshold = u_float2;
|
||||
|
||||
vec4 original = texture(u_image0, v_texCoord);
|
||||
|
||||
// Gaussian blur for the "unsharp" mask
|
||||
int samples = int(ceil(radius));
|
||||
float sigma = radius / 2.0;
|
||||
|
||||
vec4 blurred = vec4(0.0);
|
||||
float totalWeight = 0.0;
|
||||
|
||||
for (int x = -samples; x <= samples; x++) {
|
||||
for (int y = -samples; y <= samples; y++) {
|
||||
vec2 offset = vec2(float(x), float(y)) * texel;
|
||||
vec4 sample_color = texture(u_image0, v_texCoord + offset);
|
||||
|
||||
float dist = length(vec2(float(x), float(y)));
|
||||
float weight = gaussian(dist, sigma);
|
||||
blurred += sample_color * weight;
|
||||
totalWeight += weight;
|
||||
}
|
||||
}
|
||||
blurred /= totalWeight;
|
||||
|
||||
// Unsharp mask = original - blurred
|
||||
vec3 mask = original.rgb - blurred.rgb;
|
||||
|
||||
// Luminance-based threshold with smooth falloff
|
||||
float lumaDelta = abs(getLuminance(original.rgb) - getLuminance(blurred.rgb));
|
||||
float thresholdScale = smoothstep(0.0, threshold, lumaDelta);
|
||||
mask *= thresholdScale;
|
||||
|
||||
// Sharpen: original + mask * amount
|
||||
vec3 sharpened = original.rgb + mask * amount;
|
||||
|
||||
fragColor0 = vec4(clamp(sharpened, 0.0, 1.0), original.a);
|
||||
}
|
||||
159
blueprints/.glsl/update_blueprints.py
Normal file
159
blueprints/.glsl/update_blueprints.py
Normal file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Shader Blueprint Updater
|
||||
|
||||
Syncs GLSL shader files between this folder and blueprint JSON files.
|
||||
|
||||
File naming convention:
|
||||
{Blueprint Name}_{node_id}.frag
|
||||
|
||||
Usage:
|
||||
python update_blueprints.py extract # Extract shaders from JSONs to here
|
||||
python update_blueprints.py patch # Patch shaders back into JSONs
|
||||
python update_blueprints.py # Same as patch (default)
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
GLSL_DIR = Path(__file__).parent
|
||||
BLUEPRINTS_DIR = GLSL_DIR.parent
|
||||
|
||||
|
||||
def get_blueprint_files():
|
||||
"""Get all blueprint JSON files."""
|
||||
return sorted(BLUEPRINTS_DIR.glob("*.json"))
|
||||
|
||||
|
||||
def sanitize_filename(name):
|
||||
"""Convert blueprint name to safe filename."""
|
||||
return re.sub(r'[^\w\-]', '_', name)
|
||||
|
||||
|
||||
def extract_shaders():
|
||||
"""Extract all shaders from blueprint JSONs to this folder."""
|
||||
extracted = 0
|
||||
for json_path in get_blueprint_files():
|
||||
blueprint_name = json_path.stem
|
||||
|
||||
try:
|
||||
with open(json_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
logger.warning("Skipping %s: %s", json_path.name, e)
|
||||
continue
|
||||
|
||||
# Find GLSLShader nodes in subgraphs
|
||||
for subgraph in data.get('definitions', {}).get('subgraphs', []):
|
||||
for node in subgraph.get('nodes', []):
|
||||
if node.get('type') == 'GLSLShader':
|
||||
node_id = node.get('id')
|
||||
widgets = node.get('widgets_values', [])
|
||||
|
||||
# Find shader code (first string that looks like GLSL)
|
||||
for widget in widgets:
|
||||
if isinstance(widget, str) and widget.startswith('#version'):
|
||||
safe_name = sanitize_filename(blueprint_name)
|
||||
frag_name = f"{safe_name}_{node_id}.frag"
|
||||
frag_path = GLSL_DIR / frag_name
|
||||
|
||||
with open(frag_path, 'w') as f:
|
||||
f.write(widget)
|
||||
|
||||
logger.info(" Extracted: %s", frag_name)
|
||||
extracted += 1
|
||||
break
|
||||
|
||||
logger.info("\nExtracted %d shader(s)", extracted)
|
||||
|
||||
|
||||
def patch_shaders():
|
||||
"""Patch shaders from this folder back into blueprint JSONs."""
|
||||
# Build lookup: blueprint_name -> [(node_id, shader_code), ...]
|
||||
shader_updates = {}
|
||||
|
||||
for frag_path in sorted(GLSL_DIR.glob("*.frag")):
|
||||
# Parse filename: {blueprint_name}_{node_id}.frag
|
||||
parts = frag_path.stem.rsplit('_', 1)
|
||||
if len(parts) != 2:
|
||||
logger.warning("Skipping %s: invalid filename format", frag_path.name)
|
||||
continue
|
||||
|
||||
blueprint_name, node_id_str = parts
|
||||
|
||||
try:
|
||||
node_id = int(node_id_str)
|
||||
except ValueError:
|
||||
logger.warning("Skipping %s: invalid node_id", frag_path.name)
|
||||
continue
|
||||
|
||||
with open(frag_path, 'r') as f:
|
||||
shader_code = f.read()
|
||||
|
||||
if blueprint_name not in shader_updates:
|
||||
shader_updates[blueprint_name] = []
|
||||
shader_updates[blueprint_name].append((node_id, shader_code))
|
||||
|
||||
# Apply updates to JSON files
|
||||
patched = 0
|
||||
for json_path in get_blueprint_files():
|
||||
blueprint_name = sanitize_filename(json_path.stem)
|
||||
|
||||
if blueprint_name not in shader_updates:
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(json_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
logger.error("Error reading %s: %s", json_path.name, e)
|
||||
continue
|
||||
|
||||
modified = False
|
||||
for node_id, shader_code in shader_updates[blueprint_name]:
|
||||
# Find the node and update
|
||||
for subgraph in data.get('definitions', {}).get('subgraphs', []):
|
||||
for node in subgraph.get('nodes', []):
|
||||
if node.get('id') == node_id and node.get('type') == 'GLSLShader':
|
||||
widgets = node.get('widgets_values', [])
|
||||
if len(widgets) > 0 and widgets[0] != shader_code:
|
||||
widgets[0] = shader_code
|
||||
modified = True
|
||||
logger.info(" Patched: %s (node %d)", json_path.name, node_id)
|
||||
patched += 1
|
||||
|
||||
if modified:
|
||||
with open(json_path, 'w') as f:
|
||||
json.dump(data, f)
|
||||
|
||||
if patched == 0:
|
||||
logger.info("No changes to apply.")
|
||||
else:
|
||||
logger.info("\nPatched %d shader(s)", patched)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
command = "patch"
|
||||
else:
|
||||
command = sys.argv[1].lower()
|
||||
|
||||
if command == "extract":
|
||||
logger.info("Extracting shaders from blueprints...")
|
||||
extract_shaders()
|
||||
elif command in ("patch", "update", "apply"):
|
||||
logger.info("Patching shaders into blueprints...")
|
||||
patch_shaders()
|
||||
else:
|
||||
logger.info(__doc__)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
blueprints/Brightness and Contrast.json
Normal file
1
blueprints/Brightness and Contrast.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Chromatic Aberration.json
Normal file
1
blueprints/Chromatic Aberration.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Color Adjustment.json
Normal file
1
blueprints/Color Adjustment.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Edge-Preserving Blur.json
Normal file
1
blueprints/Edge-Preserving Blur.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Film Grain.json
Normal file
1
blueprints/Film Grain.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Glow.json
Normal file
1
blueprints/Glow.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Hue and Saturation.json
Normal file
1
blueprints/Hue and Saturation.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Image Blur.json
Normal file
1
blueprints/Image Blur.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Image Channels.json
Normal file
1
blueprints/Image Channels.json
Normal file
@@ -0,0 +1 @@
|
||||
{"revision": 0, "last_node_id": 29, "last_link_id": 0, "nodes": [{"id": 29, "type": "4c9d6ea4-b912-40e5-8766-6793a9758c53", "pos": [1970, -230], "size": [180, 86], "flags": {}, "order": 5, "mode": 0, "inputs": [{"label": "image", "localized_name": "images.image0", "name": "images.image0", "type": "IMAGE", "link": null}], "outputs": [{"label": "R", "localized_name": "IMAGE0", "name": "IMAGE0", "type": "IMAGE", "links": []}, {"label": "G", "localized_name": "IMAGE1", "name": "IMAGE1", "type": "IMAGE", "links": []}, {"label": "B", "localized_name": "IMAGE2", "name": "IMAGE2", "type": "IMAGE", "links": []}, {"label": "A", "localized_name": "IMAGE3", "name": "IMAGE3", "type": "IMAGE", "links": []}], "title": "Image Channels", "properties": {"proxyWidgets": []}, "widgets_values": []}], "links": [], "version": 0.4, "definitions": {"subgraphs": [{"id": "4c9d6ea4-b912-40e5-8766-6793a9758c53", "version": 1, "state": {"lastGroupId": 0, "lastNodeId": 28, "lastLinkId": 39, "lastRerouteId": 0}, "revision": 0, "config": {}, "name": "Image Channels", "inputNode": {"id": -10, "bounding": [1820, -185, 120, 60]}, "outputNode": {"id": -20, "bounding": [2460, -215, 120, 120]}, "inputs": [{"id": "3522932b-2d86-4a1f-a02a-cb29f3a9d7fe", "name": "images.image0", "type": "IMAGE", "linkIds": [39], "localized_name": "images.image0", "label": "image", "pos": [1920, -165]}], "outputs": [{"id": "605cb9c3-b065-4d9b-81d2-3ec331889b2b", "name": "IMAGE0", "type": "IMAGE", "linkIds": [26], "localized_name": "IMAGE0", "label": "R", "pos": [2480, -195]}, {"id": "fb44a77e-0522-43e9-9527-82e7465b3596", "name": "IMAGE1", "type": "IMAGE", "linkIds": [27], "localized_name": "IMAGE1", "label": "G", "pos": [2480, -175]}, {"id": "81460ee6-0131-402a-874f-6bf3001fc4ff", "name": "IMAGE2", "type": "IMAGE", "linkIds": [28], "localized_name": "IMAGE2", "label": "B", "pos": [2480, -155]}, {"id": "ae690246-80d4-4951-b1d9-9306d8a77417", "name": "IMAGE3", "type": "IMAGE", "linkIds": [29], "localized_name": "IMAGE3", "label": "A", "pos": [2480, -135]}], "widgets": [], "nodes": [{"id": 23, "type": "GLSLShader", "pos": [2000, -330], "size": [400, 172], "flags": {}, "order": 0, "mode": 0, "inputs": [{"label": "image", "localized_name": "images.image0", "name": "images.image0", "type": "IMAGE", "link": 39}, {"localized_name": "fragment_shader", "name": "fragment_shader", "type": "STRING", "widget": {"name": "fragment_shader"}, "link": null}, {"localized_name": "size_mode", "name": "size_mode", "type": "COMFY_DYNAMICCOMBO_V3", "widget": {"name": "size_mode"}, "link": null}, {"label": "image1", "localized_name": "images.image1", "name": "images.image1", "shape": 7, "type": "IMAGE", "link": null}], "outputs": [{"label": "R", "localized_name": "IMAGE0", "name": "IMAGE0", "type": "IMAGE", "links": [26]}, {"label": "G", "localized_name": "IMAGE1", "name": "IMAGE1", "type": "IMAGE", "links": [27]}, {"label": "B", "localized_name": "IMAGE2", "name": "IMAGE2", "type": "IMAGE", "links": [28]}, {"label": "A", "localized_name": "IMAGE3", "name": "IMAGE3", "type": "IMAGE", "links": [29]}], "properties": {"Node name for S&R": "GLSLShader"}, "widgets_values": ["#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\nlayout(location = 1) out vec4 fragColor1;\nlayout(location = 2) out vec4 fragColor2;\nlayout(location = 3) out vec4 fragColor3;\n\nvoid main() {\n vec4 color = texture(u_image0, v_texCoord);\n // Output each channel as grayscale to separate render targets\n fragColor0 = vec4(vec3(color.r), 1.0); // Red channel\n fragColor1 = vec4(vec3(color.g), 1.0); // Green channel\n fragColor2 = vec4(vec3(color.b), 1.0); // Blue channel\n fragColor3 = vec4(vec3(color.a), 1.0); // Alpha channel\n}\n", "from_input"]}], "groups": [], "links": [{"id": 39, "origin_id": -10, "origin_slot": 0, "target_id": 23, "target_slot": 0, "type": "IMAGE"}, {"id": 26, "origin_id": 23, "origin_slot": 0, "target_id": -20, "target_slot": 0, "type": "IMAGE"}, {"id": 27, "origin_id": 23, "origin_slot": 1, "target_id": -20, "target_slot": 1, "type": "IMAGE"}, {"id": 28, "origin_id": 23, "origin_slot": 2, "target_id": -20, "target_slot": 2, "type": "IMAGE"}, {"id": 29, "origin_id": 23, "origin_slot": 3, "target_id": -20, "target_slot": 3, "type": "IMAGE"}], "extra": {"workflowRendererVersion": "LG"}}]}}
|
||||
1
blueprints/Image Levels.json
Normal file
1
blueprints/Image Levels.json
Normal file
File diff suppressed because one or more lines are too long
1
blueprints/Sharpen.json
Normal file
1
blueprints/Sharpen.json
Normal file
@@ -0,0 +1 @@
|
||||
{"revision":0,"last_node_id":25,"last_link_id":0,"nodes":[{"id":25,"type":"621ba4e2-22a8-482d-a369-023753198b7b","pos":[4610,-790],"size":[230,58],"flags":{},"order":4,"mode":0,"inputs":[{"label":"image","localized_name":"images.image0","name":"images.image0","type":"IMAGE","link":null}],"outputs":[{"label":"IMAGE","localized_name":"IMAGE0","name":"IMAGE0","type":"IMAGE","links":[]}],"title":"Sharpen","properties":{"proxyWidgets":[["24","value"]]},"widgets_values":[]}],"links":[],"version":0.4,"definitions":{"subgraphs":[{"id":"621ba4e2-22a8-482d-a369-023753198b7b","version":1,"state":{"lastGroupId":0,"lastNodeId":24,"lastLinkId":36,"lastRerouteId":0},"revision":0,"config":{},"name":"Sharpen","inputNode":{"id":-10,"bounding":[4090,-825,120,60]},"outputNode":{"id":-20,"bounding":[5150,-825,120,60]},"inputs":[{"id":"37011fb7-14b7-4e0e-b1a0-6a02e8da1fd7","name":"images.image0","type":"IMAGE","linkIds":[34],"localized_name":"images.image0","label":"image","pos":[4190,-805]}],"outputs":[{"id":"e9182b3f-635c-4cd4-a152-4b4be17ae4b9","name":"IMAGE0","type":"IMAGE","linkIds":[35],"localized_name":"IMAGE0","label":"IMAGE","pos":[5170,-805]}],"widgets":[],"nodes":[{"id":24,"type":"PrimitiveFloat","pos":[4280,-1240],"size":[270,58],"flags":{},"order":0,"mode":0,"inputs":[{"label":"strength","localized_name":"value","name":"value","type":"FLOAT","widget":{"name":"value"},"link":null}],"outputs":[{"localized_name":"FLOAT","name":"FLOAT","type":"FLOAT","links":[36]}],"properties":{"Node name for S&R":"PrimitiveFloat","min":0,"max":3,"precision":2,"step":0.05},"widgets_values":[0.5]},{"id":23,"type":"GLSLShader","pos":[4570,-1240],"size":[370,192],"flags":{},"order":1,"mode":0,"inputs":[{"label":"image0","localized_name":"images.image0","name":"images.image0","type":"IMAGE","link":34},{"label":"image1","localized_name":"images.image1","name":"images.image1","shape":7,"type":"IMAGE","link":null},{"label":"u_float0","localized_name":"floats.u_float0","name":"floats.u_float0","shape":7,"type":"FLOAT","link":36},{"label":"u_float1","localized_name":"floats.u_float1","name":"floats.u_float1","shape":7,"type":"FLOAT","link":null},{"label":"u_int0","localized_name":"ints.u_int0","name":"ints.u_int0","shape":7,"type":"INT","link":null},{"localized_name":"fragment_shader","name":"fragment_shader","type":"STRING","widget":{"name":"fragment_shader"},"link":null},{"localized_name":"size_mode","name":"size_mode","type":"COMFY_DYNAMICCOMBO_V3","widget":{"name":"size_mode"},"link":null}],"outputs":[{"localized_name":"IMAGE0","name":"IMAGE0","type":"IMAGE","links":[35]},{"localized_name":"IMAGE1","name":"IMAGE1","type":"IMAGE","links":null},{"localized_name":"IMAGE2","name":"IMAGE2","type":"IMAGE","links":null},{"localized_name":"IMAGE3","name":"IMAGE3","type":"IMAGE","links":null}],"properties":{"Node name for S&R":"GLSLShader"},"widgets_values":["#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // strength [0.0 – 2.0] typical: 0.3–1.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / u_resolution;\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}","from_input"]}],"groups":[],"links":[{"id":36,"origin_id":24,"origin_slot":0,"target_id":23,"target_slot":2,"type":"FLOAT"},{"id":34,"origin_id":-10,"origin_slot":0,"target_id":23,"target_slot":0,"type":"IMAGE"},{"id":35,"origin_id":23,"origin_slot":0,"target_id":-20,"target_slot":0,"type":"IMAGE"}],"extra":{"workflowRendererVersion":"LG"}}]}}
|
||||
1
blueprints/Unsharp Mask.json
Normal file
1
blueprints/Unsharp Mask.json
Normal file
File diff suppressed because one or more lines are too long
@@ -102,7 +102,19 @@ class VideoConv3d(nn.Module):
|
||||
return self.conv(x)
|
||||
|
||||
def interpolate_up(x, scale_factor):
|
||||
return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest")
|
||||
try:
|
||||
return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest")
|
||||
except: #operation not implemented for bf16
|
||||
orig_shape = list(x.shape)
|
||||
out_shape = orig_shape[:2]
|
||||
for i in range(len(orig_shape) - 2):
|
||||
out_shape.append(round(orig_shape[i + 2] * scale_factor[i]))
|
||||
out = torch.empty(out_shape, dtype=x.dtype, layout=x.layout, device=x.device)
|
||||
split = 8
|
||||
l = out.shape[1] // split
|
||||
for i in range(0, out.shape[1], l):
|
||||
out[:,i:i+l] = torch.nn.functional.interpolate(x[:,i:i+l].to(torch.float32), scale_factor=scale_factor, mode="nearest").to(x.dtype)
|
||||
return out
|
||||
|
||||
class Upsample(nn.Module):
|
||||
def __init__(self, in_channels, with_conv, conv_op=ops.Conv2d, scale_factor=2.0):
|
||||
|
||||
@@ -374,31 +374,6 @@ def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Ten
|
||||
|
||||
return padded_tensor
|
||||
|
||||
def calculate_shape(patches, weight, key, original_weights=None):
|
||||
current_shape = weight.shape
|
||||
|
||||
for p in patches:
|
||||
v = p[1]
|
||||
offset = p[3]
|
||||
|
||||
# Offsets restore the old shape; lists force a diff without metadata
|
||||
if offset is not None or isinstance(v, list):
|
||||
continue
|
||||
|
||||
if isinstance(v, weight_adapter.WeightAdapterBase):
|
||||
adapter_shape = v.calculate_shape(key)
|
||||
if adapter_shape is not None:
|
||||
current_shape = adapter_shape
|
||||
continue
|
||||
|
||||
# Standard diff logic with padding
|
||||
if len(v) == 2:
|
||||
patch_type, patch_data = v[0], v[1]
|
||||
if patch_type == "diff" and len(patch_data) > 1 and patch_data[1]['pad_weight']:
|
||||
current_shape = patch_data[0].shape
|
||||
|
||||
return current_shape
|
||||
|
||||
def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, original_weights=None):
|
||||
for p in patches:
|
||||
strength = p[0]
|
||||
|
||||
@@ -1514,10 +1514,8 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
|
||||
weight, _, _ = get_key_weight(self.model, key)
|
||||
if weight is None:
|
||||
return (False, 0)
|
||||
return 0
|
||||
if key in self.patches:
|
||||
if comfy.lora.calculate_shape(self.patches[key], weight, key) != weight.shape:
|
||||
return (True, 0)
|
||||
setattr(m, param_key + "_lowvram_function", LowVramPatch(key, self.patches))
|
||||
num_patches += 1
|
||||
else:
|
||||
@@ -1531,13 +1529,7 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
model_dtype = getattr(m, param_key + "_comfy_model_dtype", None) or weight.dtype
|
||||
weight._model_dtype = model_dtype
|
||||
geometry = comfy.memory_management.TensorGeometry(shape=weight.shape, dtype=model_dtype)
|
||||
return (False, comfy.memory_management.vram_aligned_size(geometry))
|
||||
|
||||
def force_load_param(self, param_key, device_to):
|
||||
key = key_param_name_to_key(n, param_key)
|
||||
if key in self.backup:
|
||||
comfy.utils.set_attr_param(self.model, key, self.backup[key].weight)
|
||||
self.patch_weight_to_device(key, device_to=device_to)
|
||||
return comfy.memory_management.vram_aligned_size(geometry)
|
||||
|
||||
if hasattr(m, "comfy_cast_weights"):
|
||||
m.comfy_cast_weights = True
|
||||
@@ -1545,19 +1537,13 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
m.seed_key = n
|
||||
set_dirty(m, dirty)
|
||||
|
||||
force_load, v_weight_size = setup_param(self, m, n, "weight")
|
||||
force_load_bias, v_weight_bias = setup_param(self, m, n, "bias")
|
||||
force_load = force_load or force_load_bias
|
||||
v_weight_size += v_weight_bias
|
||||
v_weight_size = 0
|
||||
v_weight_size += setup_param(self, m, n, "weight")
|
||||
v_weight_size += setup_param(self, m, n, "bias")
|
||||
|
||||
if force_load:
|
||||
logging.info(f"Module {n} has resizing Lora - force loading")
|
||||
force_load_param(self, "weight", device_to)
|
||||
force_load_param(self, "bias", device_to)
|
||||
else:
|
||||
if vbar is not None and not hasattr(m, "_v"):
|
||||
m._v = vbar.alloc(v_weight_size)
|
||||
allocated_size += v_weight_size
|
||||
if vbar is not None and not hasattr(m, "_v"):
|
||||
m._v = vbar.alloc(v_weight_size)
|
||||
allocated_size += v_weight_size
|
||||
|
||||
else:
|
||||
for param in params:
|
||||
@@ -1620,11 +1606,6 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
for m in self.model.modules():
|
||||
move_weight_functions(m, device_to)
|
||||
|
||||
keys = list(self.backup.keys())
|
||||
for k in keys:
|
||||
bk = self.backup[k]
|
||||
comfy.utils.set_attr_param(self.model, k, bk.weight)
|
||||
|
||||
def partially_load(self, device_to, extra_memory=0, force_patch_weights=False):
|
||||
assert not force_patch_weights #See above
|
||||
with self.use_ejected(skip_and_inject_on_exit_only=True):
|
||||
|
||||
@@ -49,12 +49,6 @@ class WeightAdapterBase:
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def calculate_shape(
|
||||
self,
|
||||
key
|
||||
):
|
||||
return None
|
||||
|
||||
def calculate_weight(
|
||||
self,
|
||||
weight,
|
||||
|
||||
@@ -214,13 +214,6 @@ class LoRAAdapter(WeightAdapterBase):
|
||||
else:
|
||||
return None
|
||||
|
||||
def calculate_shape(
|
||||
self,
|
||||
key
|
||||
):
|
||||
reshape = self.weights[5]
|
||||
return tuple(reshape) if reshape is not None else None
|
||||
|
||||
def calculate_weight(
|
||||
self,
|
||||
weight,
|
||||
|
||||
@@ -14,7 +14,6 @@ SERVER_FEATURE_FLAGS: dict[str, Any] = {
|
||||
"supports_preview_metadata": True,
|
||||
"max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes
|
||||
"extension": {"manager": {"supports_v4": True}},
|
||||
"node_replacements": True,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -21,17 +21,6 @@ class ComfyAPI_latest(ComfyAPIBase):
|
||||
VERSION = "latest"
|
||||
STABLE = False
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.node_replacement = self.NodeReplacement()
|
||||
self.execution = self.Execution()
|
||||
|
||||
class NodeReplacement(ProxiedSingleton):
|
||||
async def register(self, node_replace: io.NodeReplace) -> None:
|
||||
"""Register a node replacement mapping."""
|
||||
from server import PromptServer
|
||||
PromptServer.instance.node_replace_manager.register(node_replace)
|
||||
|
||||
class Execution(ProxiedSingleton):
|
||||
async def set_progress(
|
||||
self,
|
||||
@@ -84,6 +73,8 @@ class ComfyAPI_latest(ComfyAPIBase):
|
||||
image=to_display,
|
||||
)
|
||||
|
||||
execution: Execution
|
||||
|
||||
class ComfyExtension(ABC):
|
||||
async def on_load(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -1300,7 +1300,6 @@ class NodeInfoV1:
|
||||
name: str=None
|
||||
display_name: str=None
|
||||
description: str=None
|
||||
short_description: str=None
|
||||
python_module: Any=None
|
||||
category: str=None
|
||||
output_node: bool=None
|
||||
@@ -1391,8 +1390,6 @@ class Schema:
|
||||
hidden: list[Hidden] = field(default_factory=list)
|
||||
description: str=""
|
||||
"""Node description, shown as a tooltip when hovering over the node."""
|
||||
short_description: str=""
|
||||
"""Short node description, shown in the node list/search."""
|
||||
search_aliases: list[str] = field(default_factory=list)
|
||||
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
|
||||
is_input_list: bool = False
|
||||
@@ -1531,7 +1528,6 @@ class Schema:
|
||||
display_name=self.display_name,
|
||||
category=self.category,
|
||||
description=self.description,
|
||||
short_description=self.short_description,
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
@@ -1775,14 +1771,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DESCRIPTION
|
||||
|
||||
_SHORT_DESCRIPTION = None
|
||||
@final
|
||||
@classproperty
|
||||
def SHORT_DESCRIPTION(cls): # noqa
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls.GET_SCHEMA()
|
||||
return cls._SHORT_DESCRIPTION
|
||||
|
||||
_CATEGORY = None
|
||||
@final
|
||||
@classproperty
|
||||
@@ -1911,8 +1899,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
schema.validate()
|
||||
if cls._DESCRIPTION is None:
|
||||
cls._DESCRIPTION = schema.description
|
||||
if cls._SHORT_DESCRIPTION is None:
|
||||
cls._SHORT_DESCRIPTION = schema.short_description
|
||||
if cls._CATEGORY is None:
|
||||
cls._CATEGORY = schema.category
|
||||
if cls._EXPERIMENTAL is None:
|
||||
@@ -2044,68 +2030,6 @@ class _UIOutput(ABC):
|
||||
...
|
||||
|
||||
|
||||
class InputMapOldId(TypedDict):
|
||||
"""Map an old node input to a new node input by ID."""
|
||||
new_id: str
|
||||
old_id: str
|
||||
|
||||
class InputMapSetValue(TypedDict):
|
||||
"""Set a specific value for a new node input."""
|
||||
new_id: str
|
||||
set_value: Any
|
||||
|
||||
InputMap = InputMapOldId | InputMapSetValue
|
||||
"""
|
||||
Input mapping for node replacement. Type is inferred by dictionary keys:
|
||||
- {"new_id": str, "old_id": str} - maps old input to new input
|
||||
- {"new_id": str, "set_value": Any} - sets a specific value for new input
|
||||
"""
|
||||
|
||||
class OutputMap(TypedDict):
|
||||
"""Map outputs of node replacement via indexes."""
|
||||
new_idx: int
|
||||
old_idx: int
|
||||
|
||||
class NodeReplace:
|
||||
"""
|
||||
Defines a possible node replacement, mapping inputs and outputs of the old node to the new node.
|
||||
|
||||
Also supports assigning specific values to the input widgets of the new node.
|
||||
|
||||
Args:
|
||||
new_node_id: The class name of the new replacement node.
|
||||
old_node_id: The class name of the deprecated node.
|
||||
old_widget_ids: Ordered list of input IDs for widgets that may not have an input slot
|
||||
connected. The workflow JSON stores widget values by their relative position index,
|
||||
not by ID. This list maps those positional indexes to input IDs, enabling the
|
||||
replacement system to correctly identify widget values during node migration.
|
||||
input_mapping: List of input mappings from old node to new node.
|
||||
output_mapping: List of output mappings from old node to new node.
|
||||
"""
|
||||
def __init__(self,
|
||||
new_node_id: str,
|
||||
old_node_id: str,
|
||||
old_widget_ids: list[str] | None=None,
|
||||
input_mapping: list[InputMap] | None=None,
|
||||
output_mapping: list[OutputMap] | None=None,
|
||||
):
|
||||
self.new_node_id = new_node_id
|
||||
self.old_node_id = old_node_id
|
||||
self.old_widget_ids = old_widget_ids
|
||||
self.input_mapping = input_mapping
|
||||
self.output_mapping = output_mapping
|
||||
|
||||
def as_dict(self):
|
||||
"""Create serializable representation of the node replacement."""
|
||||
return {
|
||||
"new_node_id": self.new_node_id,
|
||||
"old_node_id": self.old_node_id,
|
||||
"old_widget_ids": self.old_widget_ids,
|
||||
"input_mapping": list(self.input_mapping) if self.input_mapping else None,
|
||||
"output_mapping": list(self.output_mapping) if self.output_mapping else None,
|
||||
}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FolderType",
|
||||
"UploadType",
|
||||
@@ -2197,5 +2121,4 @@ __all__ = [
|
||||
"ImageCompare",
|
||||
"PriceBadgeDepends",
|
||||
"PriceBadge",
|
||||
"NodeReplace",
|
||||
]
|
||||
|
||||
@@ -45,55 +45,17 @@ class BriaEditImageRequest(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
sync: bool = Field(False)
|
||||
visual_input_content_moderation: bool = Field(
|
||||
False, description="If true, returns 422 on input image moderation failure."
|
||||
)
|
||||
visual_output_content_moderation: bool = Field(
|
||||
False, description="If true, returns 422 on visual output moderation failure."
|
||||
)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class BriaStatusResponse(BaseModel):
|
||||
request_id: str = Field(...)
|
||||
status_url: str = Field(...)
|
||||
warning: str | None = Field(None)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundResult(BaseModel):
|
||||
image_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveBackgroundResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaRemoveBackgroundResult | None = Field(None)
|
||||
|
||||
|
||||
class BriaImageEditResult(BaseModel):
|
||||
class BriaResult(BaseModel):
|
||||
structured_prompt: str = Field(...)
|
||||
image_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaImageEditResponse(BaseModel):
|
||||
class BriaResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaImageEditResult | None = Field(None)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundRequest(BaseModel):
|
||||
video: str = Field(...)
|
||||
background_color: str = Field(default="transparent", description="Background color for the output video.")
|
||||
output_container_and_codec: str = Field(...)
|
||||
preserve_audio: bool = Field(True)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundResult(BaseModel):
|
||||
video_url: str = Field(...)
|
||||
|
||||
|
||||
class BriaRemoveVideoBackgroundResponse(BaseModel):
|
||||
status: str = Field(...)
|
||||
result: BriaRemoveVideoBackgroundResult | None = Field(None)
|
||||
result: BriaResult | None = Field(None)
|
||||
|
||||
@@ -64,23 +64,3 @@ class To3DProTaskResultResponse(BaseModel):
|
||||
|
||||
class To3DProTaskQueryRequest(BaseModel):
|
||||
JobId: str = Field(...)
|
||||
|
||||
|
||||
class To3DUVFileInput(BaseModel):
|
||||
Type: str = Field(..., description="File type: GLB, OBJ, or FBX")
|
||||
Url: str = Field(...)
|
||||
|
||||
|
||||
class To3DUVTaskRequest(BaseModel):
|
||||
File: To3DUVFileInput = Field(...)
|
||||
|
||||
|
||||
class TextureEditImageInfo(BaseModel):
|
||||
Url: str = Field(...)
|
||||
|
||||
|
||||
class TextureEditTaskRequest(BaseModel):
|
||||
File3D: To3DUVFileInput = Field(...)
|
||||
Image: TextureEditImageInfo | None = Field(None)
|
||||
Prompt: str | None = Field(None)
|
||||
EnablePBR: bool | None = Field(None)
|
||||
|
||||
@@ -44,7 +44,6 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
display_name="Flux 1.1 [pro] Ultra Image",
|
||||
category="api node/image/BFL",
|
||||
description="Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.",
|
||||
short_description="Generate images with Flux Pro 1.1 Ultra API.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -155,17 +154,13 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
||||
|
||||
class FluxKontextProImageNode(IO.ComfyNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [pro] API."
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id=cls.NODE_ID,
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description=cls.DESCRIPTION,
|
||||
short_description=cls.SHORT_DESCRIPTION,
|
||||
description="Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -273,7 +268,6 @@ class FluxKontextProImageNode(IO.ComfyNode):
|
||||
class FluxKontextMaxImageNode(FluxKontextProImageNode):
|
||||
|
||||
DESCRIPTION = "Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio."
|
||||
SHORT_DESCRIPTION = "Edit images with Flux.1 Kontext [max] API."
|
||||
BFL_PATH = "/proxy/bfl/flux-kontext-max/generate"
|
||||
NODE_ID = "FluxKontextMaxImageNode"
|
||||
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
||||
@@ -288,7 +282,6 @@ class FluxProExpandNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Expand Image",
|
||||
category="api node/image/BFL",
|
||||
description="Outpaints image based on prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -425,7 +418,6 @@ class FluxProFillNode(IO.ComfyNode):
|
||||
display_name="Flux.1 Fill Image",
|
||||
category="api node/image/BFL",
|
||||
description="Inpaints image based on mask and prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -551,7 +543,6 @@ class Flux2ProImageNode(IO.ComfyNode):
|
||||
display_name=cls.DISPLAY_NAME,
|
||||
category="api node/image/BFL",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -3,11 +3,7 @@ from typing_extensions import override
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.bria import (
|
||||
BriaEditImageRequest,
|
||||
BriaRemoveBackgroundRequest,
|
||||
BriaRemoveBackgroundResponse,
|
||||
BriaRemoveVideoBackgroundRequest,
|
||||
BriaRemoveVideoBackgroundResponse,
|
||||
BriaImageEditResponse,
|
||||
BriaResponse,
|
||||
BriaStatusResponse,
|
||||
InputModerationSettings,
|
||||
)
|
||||
@@ -15,12 +11,10 @@ from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
convert_mask_to_image,
|
||||
download_url_to_image_tensor,
|
||||
download_url_to_video_output,
|
||||
get_number_of_images,
|
||||
poll_op,
|
||||
sync_op,
|
||||
upload_image_to_comfyapi,
|
||||
upload_video_to_comfyapi,
|
||||
validate_video_duration,
|
||||
upload_images_to_comfyapi,
|
||||
)
|
||||
|
||||
|
||||
@@ -33,7 +27,6 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
display_name="Bria FIBO Image Edit",
|
||||
category="api node/image/Bria",
|
||||
description="Edit images using Bria latest model",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["FIBO"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -80,15 +73,21 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
IO.DynamicCombo.Input(
|
||||
"moderation",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"true",
|
||||
[
|
||||
IO.Boolean.Input("prompt_content_moderation", default=False),
|
||||
IO.Boolean.Input("visual_input_moderation", default=False),
|
||||
IO.Boolean.Input("visual_output_moderation", default=True),
|
||||
IO.Boolean.Input(
|
||||
"prompt_content_moderation", default=False
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"visual_input_moderation", default=False
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"visual_output_moderation", default=True
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
],
|
||||
tooltip="Moderation settings",
|
||||
),
|
||||
@@ -128,26 +127,50 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
mask: Input.Image | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
if not prompt and not structured_prompt:
|
||||
raise ValueError("One of prompt or structured_prompt is required to be non-empty.")
|
||||
raise ValueError(
|
||||
"One of prompt or structured_prompt is required to be non-empty."
|
||||
)
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
mask_url = None
|
||||
if mask is not None:
|
||||
mask_url = await upload_image_to_comfyapi(cls, convert_mask_to_image(mask), wait_label="Uploading mask")
|
||||
mask_url = (
|
||||
await upload_images_to_comfyapi(
|
||||
cls,
|
||||
convert_mask_to_image(mask),
|
||||
max_images=1,
|
||||
mime_type="image/png",
|
||||
wait_label="Uploading mask",
|
||||
)
|
||||
)[0]
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="proxy/bria/v2/image/edit", method="POST"),
|
||||
data=BriaEditImageRequest(
|
||||
instruction=prompt if prompt else None,
|
||||
structured_instruction=structured_prompt if structured_prompt else None,
|
||||
images=[await upload_image_to_comfyapi(cls, image, wait_label="Uploading image")],
|
||||
images=await upload_images_to_comfyapi(
|
||||
cls,
|
||||
image,
|
||||
max_images=1,
|
||||
mime_type="image/png",
|
||||
wait_label="Uploading image",
|
||||
),
|
||||
mask=mask_url,
|
||||
negative_prompt=negative_prompt if negative_prompt else None,
|
||||
guidance_scale=guidance_scale,
|
||||
seed=seed,
|
||||
model_version=model,
|
||||
steps_num=steps,
|
||||
prompt_content_moderation=moderation.get("prompt_content_moderation", False),
|
||||
visual_input_content_moderation=moderation.get("visual_input_moderation", False),
|
||||
visual_output_content_moderation=moderation.get("visual_output_moderation", False),
|
||||
prompt_content_moderation=moderation.get(
|
||||
"prompt_content_moderation", False
|
||||
),
|
||||
visual_input_content_moderation=moderation.get(
|
||||
"visual_input_moderation", False
|
||||
),
|
||||
visual_output_content_moderation=moderation.get(
|
||||
"visual_output_moderation", False
|
||||
),
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
@@ -155,7 +178,7 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaImageEditResponse,
|
||||
response_model=BriaResponse,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_image_tensor(response.result.image_url),
|
||||
@@ -163,167 +186,11 @@ class BriaImageEditNode(IO.ComfyNode):
|
||||
)
|
||||
|
||||
|
||||
class BriaRemoveImageBackground(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="BriaRemoveImageBackground",
|
||||
display_name="Bria Remove Image Background",
|
||||
category="api node/image/Bria",
|
||||
description="Remove the background from an image using Bria RMBG 2.0.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.DynamicCombo.Input(
|
||||
"moderation",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("false", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"true",
|
||||
[
|
||||
IO.Boolean.Input("visual_input_moderation", default=False),
|
||||
IO.Boolean.Input("visual_output_moderation", default=True),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Moderation settings",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Image.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.018}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
moderation: dict,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/bria/v2/image/edit/remove_background", method="POST"),
|
||||
data=BriaRemoveBackgroundRequest(
|
||||
image=await upload_image_to_comfyapi(cls, image, wait_label="Uploading image"),
|
||||
sync=False,
|
||||
visual_input_content_moderation=moderation.get("visual_input_moderation", False),
|
||||
visual_output_content_moderation=moderation.get("visual_output_moderation", False),
|
||||
seed=seed,
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaRemoveBackgroundResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(response.result.image_url))
|
||||
|
||||
|
||||
class BriaRemoveVideoBackground(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="BriaRemoveVideoBackground",
|
||||
display_name="Bria Remove Video Background",
|
||||
category="api node/video/Bria",
|
||||
description="Remove the background from a video using Bria. ",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input(
|
||||
"background_color",
|
||||
options=[
|
||||
"Black",
|
||||
"White",
|
||||
"Gray",
|
||||
"Red",
|
||||
"Green",
|
||||
"Blue",
|
||||
"Yellow",
|
||||
"Cyan",
|
||||
"Magenta",
|
||||
"Orange",
|
||||
],
|
||||
tooltip="Background color for the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Video.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.14,"format":{"suffix":"/second"}}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
video: Input.Video,
|
||||
background_color: str,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_video_duration(video, max_duration=60.0)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/bria/v2/video/edit/remove_background", method="POST"),
|
||||
data=BriaRemoveVideoBackgroundRequest(
|
||||
video=await upload_video_to_comfyapi(cls, video),
|
||||
background_color=background_color,
|
||||
output_container_and_codec="mp4_h264",
|
||||
seed=seed,
|
||||
),
|
||||
response_model=BriaStatusResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
response_model=BriaRemoveVideoBackgroundResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.result.video_url))
|
||||
|
||||
|
||||
class BriaExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
BriaImageEditNode,
|
||||
BriaRemoveImageBackground,
|
||||
BriaRemoveVideoBackground,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -60,7 +60,6 @@ class ByteDanceImageNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image",
|
||||
category="api node/image/ByteDance",
|
||||
description="Generate images using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]),
|
||||
IO.String.Input(
|
||||
@@ -183,7 +182,6 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
display_name="ByteDance Seedream 4.5",
|
||||
category="api node/image/ByteDance",
|
||||
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
|
||||
short_description="Text-to-image generation and editing up to 4K.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -382,7 +380,6 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Text to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -508,7 +505,6 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
display_name="ByteDance Image to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using ByteDance models via api based on image and prompt",
|
||||
short_description="Generate video from image and prompt via ByteDance API.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -643,7 +639,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="ByteDance First-Last-Frame to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -789,7 +784,6 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
display_name="ByteDance Reference Images to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using prompt and reference images.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -254,7 +254,6 @@ class GeminiNode(IO.ComfyNode):
|
||||
description="Generate text responses with Google's Gemini AI model. "
|
||||
"You can provide multiple types of inputs (text, images, audio, video) "
|
||||
"as context for generating more relevant and meaningful responses.",
|
||||
short_description="Generate text responses with Google's Gemini AI.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -481,7 +480,6 @@ class GeminiInputFiles(IO.ComfyNode):
|
||||
"The files will be read by the Gemini model when generating a response. "
|
||||
"The contents of the text file count toward the token limit. "
|
||||
"🛈 TIP: Can be chained together with other Gemini Input File nodes.",
|
||||
short_description="Load and prepare input files for Gemini LLM nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -536,7 +534,6 @@ class GeminiImage(IO.ComfyNode):
|
||||
display_name="Nano Banana (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Edit images synchronously via Google API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -668,7 +665,6 @@ class GeminiImage2(IO.ComfyNode):
|
||||
display_name="Nano Banana Pro (Google Gemini Image)",
|
||||
category="api node/image/Gemini",
|
||||
description="Generate or edit images synchronously via Google Vertex API.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class GrokImageNode(IO.ComfyNode):
|
||||
display_name="Grok Image",
|
||||
category="api node/image/Grok",
|
||||
description="Generate images using Grok based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -138,7 +137,6 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
display_name="Grok Image Edit",
|
||||
category="api node/image/Grok",
|
||||
description="Modify an existing image based on a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -228,7 +226,6 @@ class GrokVideoNode(IO.ComfyNode):
|
||||
display_name="Grok Video",
|
||||
category="api node/video/Grok",
|
||||
description="Generate video from a prompt or an image",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
@@ -337,7 +334,6 @@ class GrokVideoEditNode(IO.ComfyNode):
|
||||
display_name="Grok Video Edit",
|
||||
category="api node/video/Grok",
|
||||
description="Edit an existing video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -74,7 +74,6 @@ class HitPawGeneralImageEnhance(IO.ComfyNode):
|
||||
category="api node/image/HitPaw",
|
||||
description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. "
|
||||
f"Maximum output: {MAX_MP_GENERATIVE} megapixels.",
|
||||
short_description="Upscale images to super-resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["generative_portrait", "generative"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -206,7 +205,6 @@ class HitPawVideoEnhance(IO.ComfyNode):
|
||||
category="api node/video/HitPaw",
|
||||
description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. "
|
||||
"Prices shown are per second of video.",
|
||||
short_description="Upscale videos to high resolution, removing artifacts and noise.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input("model", options=model_options),
|
||||
IO.Video.Input("video"),
|
||||
|
||||
@@ -1,48 +1,31 @@
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input, Types
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.hunyuan3d import (
|
||||
Hunyuan3DViewImage,
|
||||
InputGenerateType,
|
||||
ResultFile3D,
|
||||
TextureEditTaskRequest,
|
||||
To3DProTaskCreateResponse,
|
||||
To3DProTaskQueryRequest,
|
||||
To3DProTaskRequest,
|
||||
To3DProTaskResultResponse,
|
||||
To3DUVFileInput,
|
||||
To3DUVTaskRequest,
|
||||
)
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
download_url_to_file_3d,
|
||||
download_url_to_image_tensor,
|
||||
downscale_image_tensor_by_max_side,
|
||||
poll_op,
|
||||
sync_op,
|
||||
upload_3d_model_to_comfyapi,
|
||||
upload_image_to_comfyapi,
|
||||
validate_image_dimensions,
|
||||
validate_string,
|
||||
)
|
||||
|
||||
|
||||
def _is_tencent_rate_limited(status: int, body: object) -> bool:
|
||||
return (
|
||||
status == 400
|
||||
and isinstance(body, dict)
|
||||
and "RequestLimitExceeded" in str(body.get("Response", {}).get("Error", {}).get("Code", ""))
|
||||
)
|
||||
|
||||
|
||||
def get_file_from_response(
|
||||
response_objs: list[ResultFile3D], file_type: str, raise_if_not_found: bool = True
|
||||
) -> ResultFile3D | None:
|
||||
def get_file_from_response(response_objs: list[ResultFile3D], file_type: str) -> ResultFile3D | None:
|
||||
for i in response_objs:
|
||||
if i.Type.lower() == file_type.lower():
|
||||
return i
|
||||
if raise_if_not_found:
|
||||
raise ValueError(f"'{file_type}' file type is not found in the response.")
|
||||
return None
|
||||
|
||||
|
||||
@@ -52,10 +35,8 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentTextToModelNode",
|
||||
display_name="Hunyuan3D: Text to Model",
|
||||
display_name="Hunyuan3D: Text to Model (Pro)",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from text prompts using Hunyuan3D Pro with configurable face count and geometry options.",
|
||||
short_description="Generate 3D models from text using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -139,7 +120,6 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
EnablePBR=generate_type.get("pbr", None),
|
||||
PolygonType=generate_type.get("polygon_type", None),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
@@ -151,14 +131,11 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
glb_result = get_file_from_response(result.ResultFile3Ds, "glb")
|
||||
obj_result = get_file_from_response(result.ResultFile3Ds, "obj")
|
||||
file_glb = await download_url_to_file_3d(glb_result.Url, "glb", task_id=task_id) if glb_result else None
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id
|
||||
),
|
||||
file_glb, file_glb, await download_url_to_file_3d(obj_result.Url, "obj", task_id=task_id) if obj_result else None
|
||||
)
|
||||
|
||||
|
||||
@@ -168,10 +145,8 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentImageToModelNode",
|
||||
display_name="Hunyuan3D: Image(s) to Model",
|
||||
display_name="Hunyuan3D: Image(s) to Model (Pro)",
|
||||
category="api node/3d/Tencent",
|
||||
description="Generate 3D models from images using Hunyuan3D Pro with optional multi-view inputs and configurable geometry.",
|
||||
short_description="Generate 3D models from images using Hunyuan3D Pro.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -293,7 +268,6 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
EnablePBR=generate_type.get("pbr", None),
|
||||
PolygonType=generate_type.get("polygon_type", None),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
@@ -305,257 +279,11 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
glb_result = get_file_from_response(result.ResultFile3Ds, "glb")
|
||||
obj_result = get_file_from_response(result.ResultFile3Ds, "obj")
|
||||
file_glb = await download_url_to_file_3d(glb_result.Url, "glb", task_id=task_id) if glb_result else None
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class TencentModelTo3DUVNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="TencentModelTo3DUVNode",
|
||||
display_name="Hunyuan3D: Model to UV",
|
||||
category="api node/3d/Tencent",
|
||||
description="Perform UV unfolding on a 3D model to generate UV texture. "
|
||||
"Input model must have less than 30000 faces.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DGLB, IO.File3DOBJ, IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="Input 3D model (GLB, OBJ, or FBX)",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=1,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DOBJ.Output(display_name="OBJ"),
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.2}'),
|
||||
)
|
||||
|
||||
SUPPORTED_FORMATS = {"glb", "obj", "fbx"}
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format not in cls.SUPPORTED_FORMATS:
|
||||
raise ValueError(
|
||||
f"Unsupported file format: '{file_format}'. "
|
||||
f"Supported formats: {', '.join(sorted(cls.SUPPORTED_FORMATS))}."
|
||||
)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=To3DUVTaskRequest(
|
||||
File=To3DUVFileInput(
|
||||
Type=file_format.upper(),
|
||||
Url=await upload_3d_model_to_comfyapi(cls, model_3d, file_format),
|
||||
)
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj"),
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
await download_url_to_image_tensor(get_file_from_response(result.ResultFile3Ds, "image").Url),
|
||||
)
|
||||
|
||||
|
||||
class Tencent3DTextureEditNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Tencent3DTextureEditNode",
|
||||
display_name="Hunyuan3D: 3D Texture Edit",
|
||||
category="api node/3d/Tencent",
|
||||
description="After inputting the 3D model, perform 3D model texture redrawing.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="3D model in FBX format. Model should have less than 100000 faces.",
|
||||
),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Describes texture editing. Supports up to 1024 UTF-8 characters.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DGLB.Output(display_name="GLB"),
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd": 0.6}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
prompt: str,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format != "fbx":
|
||||
raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.")
|
||||
validate_string(prompt, field_name="prompt", min_length=1, max_length=1024)
|
||||
model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=TextureEditTaskRequest(
|
||||
File3D=To3DUVFileInput(Type=file_format.upper(), Url=model_url),
|
||||
Prompt=prompt,
|
||||
EnablePBR=True,
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb"),
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
)
|
||||
|
||||
|
||||
class Tencent3DPartNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Tencent3DPartNode",
|
||||
display_name="Hunyuan3D: 3D Part",
|
||||
category="api node/3d/Tencent",
|
||||
description="Automatically perform component identification and generation based on the model structure.",
|
||||
inputs=[
|
||||
IO.MultiType.Input(
|
||||
"model_3d",
|
||||
types=[IO.File3DFBX, IO.File3DAny],
|
||||
tooltip="3D model in FBX format. Model should have less than 30000 faces.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.File3DFBX.Output(display_name="FBX"),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.6}'),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model_3d: Types.File3D,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
_ = seed
|
||||
file_format = model_3d.format.lower()
|
||||
if file_format != "fbx":
|
||||
raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.")
|
||||
model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part", method="POST"),
|
||||
response_model=To3DProTaskCreateResponse,
|
||||
data=To3DUVTaskRequest(
|
||||
File=To3DUVFileInput(Type=file_format.upper(), Url=model_url),
|
||||
),
|
||||
is_rate_limited=_is_tencent_rate_limited,
|
||||
)
|
||||
if response.Error:
|
||||
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
|
||||
result = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part/query", method="POST"),
|
||||
data=To3DProTaskQueryRequest(JobId=response.JobId),
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"),
|
||||
file_glb, file_glb, await download_url_to_file_3d(obj_result.Url, "obj", task_id=task_id) if obj_result else None
|
||||
)
|
||||
|
||||
|
||||
@@ -565,9 +293,6 @@ class TencentHunyuan3DExtension(ComfyExtension):
|
||||
return [
|
||||
TencentTextToModelNode,
|
||||
TencentImageToModelNode,
|
||||
# TencentModelTo3DUVNode,
|
||||
# Tencent3DTextureEditNode,
|
||||
Tencent3DPartNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -236,7 +236,6 @@ class IdeogramV1(IO.ComfyNode):
|
||||
display_name="Ideogram V1",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V1 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -362,7 +361,6 @@ class IdeogramV2(IO.ComfyNode):
|
||||
display_name="Ideogram V2",
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V2 model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -528,7 +526,6 @@ class IdeogramV3(IO.ComfyNode):
|
||||
category="api node/image/Ideogram",
|
||||
description="Generates images using the Ideogram V3 model. "
|
||||
"Supports both regular image generation from text prompts and image editing with mask.",
|
||||
short_description="Generate and edit images with Ideogram V3.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -642,7 +642,6 @@ class KlingCameraControls(IO.ComfyNode):
|
||||
display_name="Kling Camera Controls",
|
||||
category="api node/video/Kling",
|
||||
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
|
||||
short_description="Configure Kling camera controls and motion effects.",
|
||||
inputs=[
|
||||
IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
|
||||
IO.Float.Input(
|
||||
@@ -763,7 +762,6 @@ class KlingTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Text to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -851,7 +849,6 @@ class OmniProTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Text to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use text prompts to generate videos with the latest Kling model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -992,7 +989,6 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni First-Last-Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
|
||||
short_description="Generate video from start/end frames or reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1191,7 +1187,6 @@ class OmniProImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use up to 7 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from up to 7 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1352,7 +1347,6 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Video to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
|
||||
short_description="Generate video from a video and reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1464,7 +1458,6 @@ class OmniProEditVideoNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Edit Video",
|
||||
category="api node/video/Kling",
|
||||
description="Edit an existing video with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-video-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1572,7 +1565,6 @@ class OmniProImageNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Omni Image",
|
||||
category="api node/image/Kling",
|
||||
description="Create or edit images with the latest model from Kling.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v3-omni", "kling-image-o1"]),
|
||||
IO.String.Input(
|
||||
@@ -1701,7 +1693,6 @@ class KlingCameraControlT2VNode(IO.ComfyNode):
|
||||
display_name="Kling Text to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
|
||||
short_description="Generate videos from text with camera movement controls.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -1763,8 +1754,6 @@ class KlingImage2VideoNode(IO.ComfyNode):
|
||||
node_id="KlingImage2VideoNode",
|
||||
display_name="Kling Image(First Frame) to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video from a first-frame image with configurable model, mode, aspect ratio, and duration settings.",
|
||||
short_description="Generate video from a first-frame reference image.",
|
||||
inputs=[
|
||||
IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
@@ -1865,7 +1854,6 @@ class KlingCameraControlI2VNode(IO.ComfyNode):
|
||||
display_name="Kling Image to Video (Camera Control)",
|
||||
category="api node/video/Kling",
|
||||
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
|
||||
short_description="Generate videos from images with camera movement controls.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -1937,7 +1925,6 @@ class KlingStartEndFrameNode(IO.ComfyNode):
|
||||
display_name="Kling Start-End Frame to Video",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
|
||||
short_description="Generate video transitioning between start and end frame images.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"start_frame",
|
||||
@@ -2032,7 +2019,6 @@ class KlingVideoExtendNode(IO.ComfyNode):
|
||||
display_name="Kling Video Extend",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
|
||||
short_description="Extend videos generated by other Kling nodes.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -2114,7 +2100,6 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Dual Character Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
|
||||
short_description="Apply dual-character video effects from two images.",
|
||||
inputs=[
|
||||
IO.Image.Input("image_left", tooltip="Left side image"),
|
||||
IO.Image.Input("image_right", tooltip="Right side image"),
|
||||
@@ -2205,7 +2190,6 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
|
||||
display_name="Kling Video Effects",
|
||||
category="api node/video/Kling",
|
||||
description="Achieve different special effects when generating a video based on the effect_scene.",
|
||||
short_description="Apply special video effects to a single image.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -2279,7 +2263,6 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to audio content.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -2331,7 +2314,6 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Kling Lip Sync Video with Text",
|
||||
category="api node/video/Kling",
|
||||
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
|
||||
short_description="Sync video mouth movements to a text prompt.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.String.Input(
|
||||
@@ -2399,7 +2381,6 @@ class KlingVirtualTryOnNode(IO.ComfyNode):
|
||||
display_name="Kling Virtual Try On",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
|
||||
short_description="Virtually try clothing onto a human image.",
|
||||
inputs=[
|
||||
IO.Image.Input("human_image"),
|
||||
IO.Image.Input("cloth_image"),
|
||||
@@ -2467,7 +2448,6 @@ class KlingImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Kling 3.0 Image",
|
||||
category="api node/image/Kling",
|
||||
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
|
||||
short_description="Generate images from text with optional reference image.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
|
||||
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
|
||||
@@ -2601,8 +2581,6 @@ class TextToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingTextToVideoWithAudio",
|
||||
display_name="Kling 2.6 Text to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from text using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."),
|
||||
@@ -2671,8 +2649,6 @@ class ImageToVideoWithAudio(IO.ComfyNode):
|
||||
node_id="KlingImageToVideoWithAudio",
|
||||
display_name="Kling 2.6 Image(First Frame) to Video with Audio",
|
||||
category="api node/video/Kling",
|
||||
description="Generate a video with synchronized audio from a first-frame image and text prompt using the Kling v2-6 model.",
|
||||
short_description="Generate video with audio from an image using Kling v2-6.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model_name", options=["kling-v2-6"]),
|
||||
IO.Image.Input("start_frame"),
|
||||
@@ -2743,8 +2719,6 @@ class MotionControl(IO.ComfyNode):
|
||||
node_id="KlingMotionControl",
|
||||
display_name="Kling Motion Control",
|
||||
category="api node/video/Kling",
|
||||
description="Drive character movement and expression in video using a reference image and motion reference video.",
|
||||
short_description="Control video character motion using reference image and video.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.Image.Input("reference_image"),
|
||||
@@ -2841,7 +2815,6 @@ class KlingVideoNode(IO.ComfyNode):
|
||||
category="api node/video/Kling",
|
||||
description="Generate videos with Kling V3. "
|
||||
"Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.",
|
||||
short_description="Generate videos with Kling V3 from text or images.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"multi_shot",
|
||||
|
||||
@@ -52,7 +52,6 @@ class TextToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Text To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
IO.String.Input(
|
||||
@@ -129,7 +128,6 @@ class ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="LTXV Image To Video",
|
||||
category="api node/video/LTXV",
|
||||
description="Professional-quality videos with customizable duration and resolution based on start image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="First frame to be used for the video."),
|
||||
IO.Combo.Input("model", options=list(MODELS_MAP.keys())),
|
||||
|
||||
@@ -46,7 +46,6 @@ class LumaReferenceNode(IO.ComfyNode):
|
||||
display_name="Luma Reference",
|
||||
category="api node/image/Luma",
|
||||
description="Holds an image and weight for use with Luma Generate Image node.",
|
||||
short_description="Image and weight input for Luma generation.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -86,7 +85,6 @@ class LumaConceptsNode(IO.ComfyNode):
|
||||
display_name="Luma Concepts",
|
||||
category="api node/video/Luma",
|
||||
description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.",
|
||||
short_description="Camera concepts for Luma video generation nodes.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"concept1",
|
||||
@@ -136,7 +134,6 @@ class LumaImageGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Generates images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -281,7 +278,6 @@ class LumaImageModifyNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Image",
|
||||
category="api node/image/Luma",
|
||||
description="Modifies images synchronously based on prompt and aspect ratio.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -375,7 +371,6 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Text to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -477,7 +472,6 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Luma Image to Video",
|
||||
category="api node/video/Luma",
|
||||
description="Generates videos synchronously based on prompt, input images, and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -242,7 +242,6 @@ class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
|
||||
category="api node/image/Magnific",
|
||||
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
|
||||
"Maximum output: 10060×10060 pixels.",
|
||||
short_description="High-fidelity upscaling with sharpness, grain, and detail control.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
@@ -402,7 +401,6 @@ class MagnificImageStyleTransferNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Style Transfer",
|
||||
category="api node/image/Magnific",
|
||||
description="Transfer the style from a reference image to your input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
|
||||
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
|
||||
@@ -551,7 +549,6 @@ class MagnificImageRelightNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Relight",
|
||||
category="api node/image/Magnific",
|
||||
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to relight."),
|
||||
IO.String.Input(
|
||||
@@ -790,7 +787,6 @@ class MagnificImageSkinEnhancerNode(IO.ComfyNode):
|
||||
display_name="Magnific Image Skin Enhancer",
|
||||
category="api node/image/Magnific",
|
||||
description="Skin enhancement for portraits with multiple processing modes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The portrait image to enhance."),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -34,8 +34,6 @@ class MeshyTextToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyTextToModelNode",
|
||||
display_name="Meshy: Text to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a text prompt using the Meshy API.",
|
||||
short_description="Generate a 3D model from a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
@@ -148,7 +146,6 @@ class MeshyRefineNode(IO.ComfyNode):
|
||||
display_name="Meshy: Refine Draft Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Refine a previously created draft model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
@@ -242,8 +239,6 @@ class MeshyImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyImageToModelNode",
|
||||
display_name="Meshy: Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from a single image using the Meshy API.",
|
||||
short_description="Generate a 3D model from an image.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -408,7 +403,6 @@ class MeshyMultiImageToModelNode(IO.ComfyNode):
|
||||
node_id="MeshyMultiImageToModelNode",
|
||||
display_name="Meshy: Multi-Image to Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Generate a 3D model from multiple images using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -581,7 +575,6 @@ class MeshyRigModelNode(IO.ComfyNode):
|
||||
description="Provides a rigged character in standard formats. "
|
||||
"Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
|
||||
"or humanoid assets with unclear limb and body structure.",
|
||||
short_description="Rig a character model for animation.",
|
||||
inputs=[
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
IO.Float.Input(
|
||||
@@ -661,7 +654,6 @@ class MeshyAnimateModelNode(IO.ComfyNode):
|
||||
display_name="Meshy: Animate Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply a specific animation action to a previously rigged character.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
|
||||
IO.Int.Input(
|
||||
@@ -727,7 +719,6 @@ class MeshyTextureNode(IO.ComfyNode):
|
||||
node_id="MeshyTextureNode",
|
||||
display_name="Meshy: Texture Model",
|
||||
category="api node/3d/Meshy",
|
||||
description="Apply textures to an existing 3D model using the Meshy API.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["latest"]),
|
||||
IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
|
||||
|
||||
@@ -103,7 +103,6 @@ class MinimaxTextToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Text to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on a prompt, and optional parameters.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
@@ -166,7 +165,6 @@ class MinimaxImageToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Image to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Generate videos from an image, prompt, and optional parameters.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -234,7 +232,6 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Subject to Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos synchronously based on an image and prompt, and optional parameters.",
|
||||
short_description="Subject-driven video generation from image and prompt.",
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"subject",
|
||||
@@ -299,7 +296,6 @@ class MinimaxHailuoVideoNode(IO.ComfyNode):
|
||||
display_name="MiniMax Hailuo Video",
|
||||
category="api node/video/MiniMax",
|
||||
description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.",
|
||||
short_description="Generate videos with optional start frame using Hailuo-02.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt_text",
|
||||
|
||||
@@ -166,7 +166,6 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode):
|
||||
display_name="Moonvalley Marey Image to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Moonvalley Marey Image to Video Node",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
@@ -291,8 +290,7 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyVideo2VideoNode",
|
||||
display_name="Moonvalley Marey Video to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Transform an input video into a new video using a text prompt and motion or pose control.",
|
||||
short_description="Transform video using text prompt with motion or pose control.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -417,8 +415,7 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode):
|
||||
node_id="MoonvalleyTxt2VideoNode",
|
||||
display_name="Moonvalley Marey Text to Video",
|
||||
category="api node/video/Moonvalley Marey",
|
||||
description="Generate a video from a text prompt using the Moonvalley Marey model.",
|
||||
short_description="Generate video from a text prompt using Moonvalley Marey.",
|
||||
description="",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -43,6 +43,7 @@ class SupportedOpenAIModel(str, Enum):
|
||||
o1 = "o1"
|
||||
o3 = "o3"
|
||||
o1_pro = "o1-pro"
|
||||
gpt_4o = "gpt-4o"
|
||||
gpt_4_1 = "gpt-4.1"
|
||||
gpt_4_1_mini = "gpt-4.1-mini"
|
||||
gpt_4_1_nano = "gpt-4.1-nano"
|
||||
@@ -98,7 +99,6 @@ class OpenAIDalle2(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 2",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -249,7 +249,6 @@ class OpenAIDalle3(IO.ComfyNode):
|
||||
display_name="OpenAI DALL·E 3",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -368,7 +367,6 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
display_name="OpenAI GPT Image 1.5",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -579,7 +577,6 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT",
|
||||
category="api node/text/OpenAI",
|
||||
description="Generate text responses from an OpenAI model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -652,6 +649,11 @@ class OpenAIChatNode(IO.ComfyNode):
|
||||
"usd": [0.01, 0.04],
|
||||
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
|
||||
}
|
||||
: $contains($m, "gpt-4o") ? {
|
||||
"type": "list_usd",
|
||||
"usd": [0.0025, 0.01],
|
||||
"format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" }
|
||||
}
|
||||
: $contains($m, "gpt-4.1-nano") ? {
|
||||
"type": "list_usd",
|
||||
"usd": [0.0001, 0.0004],
|
||||
@@ -807,7 +809,6 @@ class OpenAIInputFiles(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Input Files",
|
||||
category="api node/text/OpenAI",
|
||||
description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.",
|
||||
short_description="Load and prepare input files for OpenAI Chat.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"file",
|
||||
@@ -855,7 +856,6 @@ class OpenAIChatConfig(IO.ComfyNode):
|
||||
display_name="OpenAI ChatGPT Advanced Options",
|
||||
category="api node/text/OpenAI",
|
||||
description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"truncation",
|
||||
|
||||
@@ -54,8 +54,6 @@ class PixverseTemplateNode(IO.ComfyNode):
|
||||
node_id="PixverseTemplateNode",
|
||||
display_name="PixVerse Template",
|
||||
category="api node/video/PixVerse",
|
||||
description="Select a style template for PixVerse video generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("template", options=list(pixverse_templates.keys())),
|
||||
],
|
||||
@@ -78,7 +76,6 @@ class PixverseTextToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Text to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -197,7 +194,6 @@ class PixverseImageToVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Image to Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -316,7 +312,6 @@ class PixverseTransitionVideoNode(IO.ComfyNode):
|
||||
display_name="PixVerse Transition Video",
|
||||
category="api node/video/PixVerse",
|
||||
description="Generates videos based on prompt and output_size.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("first_frame"),
|
||||
IO.Image.Input("last_frame"),
|
||||
|
||||
@@ -180,7 +180,6 @@ class RecraftColorRGBNode(IO.ComfyNode):
|
||||
display_name="Recraft Color RGB",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Color by choosing specific RGB values.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."),
|
||||
IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."),
|
||||
@@ -207,7 +206,6 @@ class RecraftControlsNode(IO.ComfyNode):
|
||||
display_name="Recraft Controls",
|
||||
category="api node/image/Recraft",
|
||||
description="Create Recraft Controls for customizing Recraft generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom(RecraftIO.COLOR).Input("colors", optional=True),
|
||||
IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True),
|
||||
@@ -232,7 +230,6 @@ class RecraftStyleV3RealisticImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -257,8 +254,7 @@ class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3DigitalIllustration",
|
||||
display_name="Recraft Style - Digital Illustration",
|
||||
category="api node/image/Recraft",
|
||||
description="Select digital_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -275,10 +271,9 @@ class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode):
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="RecraftStyleV3VectorIllustrationNode",
|
||||
display_name="Recraft Style - Vector Illustration",
|
||||
display_name="Recraft Style - Realistic Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Select vector_illustration style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)),
|
||||
],
|
||||
@@ -297,8 +292,7 @@ class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode):
|
||||
node_id="RecraftStyleV3LogoRaster",
|
||||
display_name="Recraft Style - Logo Raster",
|
||||
category="api node/image/Recraft",
|
||||
description="Select logo_raster style and optional substyle.",
|
||||
short_description=None,
|
||||
description="Select realistic_image style and optional substyle.",
|
||||
inputs=[
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)),
|
||||
],
|
||||
@@ -316,7 +310,6 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
|
||||
display_name="Recraft Style - Infinite Style Library",
|
||||
category="api node/image/Recraft",
|
||||
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
|
||||
],
|
||||
@@ -342,7 +335,6 @@ class RecraftCreateStyleNode(IO.ComfyNode):
|
||||
description="Create a custom style from reference images. "
|
||||
"Upload 1-5 images to use as style references. "
|
||||
"Total size of all images is limited to 5 MB.",
|
||||
short_description="Create a custom style from 1-5 reference images.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"style",
|
||||
@@ -410,7 +402,6 @@ class RecraftTextToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates images synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
IO.Combo.Input(
|
||||
@@ -523,7 +514,6 @@ class RecraftImageToImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Image to Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and strength.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."),
|
||||
@@ -642,7 +632,6 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
|
||||
display_name="Recraft Image Inpainting",
|
||||
category="api node/image/Recraft",
|
||||
description="Modify image based on prompt and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Mask.Input("mask"),
|
||||
@@ -745,7 +734,6 @@ class RecraftTextToVectorNode(IO.ComfyNode):
|
||||
display_name="Recraft Text to Vector",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously based on prompt and resolution.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True),
|
||||
IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)),
|
||||
@@ -846,7 +834,6 @@ class RecraftVectorizeImageNode(IO.ComfyNode):
|
||||
display_name="Recraft Vectorize Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Generates SVG synchronously from an input image.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -890,7 +877,6 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Replace Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Replace background on image, based on provided prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True),
|
||||
@@ -978,7 +964,6 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
|
||||
display_name="Recraft Remove Background",
|
||||
category="api node/image/Recraft",
|
||||
description="Remove background from image, and return processed image and mask.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1027,9 +1012,8 @@ class RecraftCrispUpscaleNode(IO.ComfyNode):
|
||||
display_name="Recraft Crisp Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'crisp upscale' tool, "
|
||||
"Enhances a given raster image using ‘crisp upscale’ tool, "
|
||||
"increasing image resolution, making the image sharper and cleaner.",
|
||||
short_description="Crisp upscale to sharpen and increase image resolution.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -1074,9 +1058,8 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
|
||||
display_name="Recraft Creative Upscale Image",
|
||||
category="api node/image/Recraft",
|
||||
description="Upscale image synchronously.\n"
|
||||
"Enhances a given raster image using 'creative upscale' tool, "
|
||||
"Enhances a given raster image using ‘creative upscale’ tool, "
|
||||
"boosting resolution with a focus on refining small details and faces.",
|
||||
short_description="Creative upscale focusing on small details and faces.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
|
||||
@@ -238,7 +238,6 @@ class Rodin3D_Regular(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Regular Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -298,7 +297,6 @@ class Rodin3D_Detail(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Detail Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -358,7 +356,6 @@ class Rodin3D_Smooth(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Smooth Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
*COMMON_PARAMETERS,
|
||||
@@ -417,7 +414,6 @@ class Rodin3D_Sketch(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Sketch Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
@@ -480,7 +476,6 @@ class Rodin3D_Gen2(IO.ComfyNode):
|
||||
display_name="Rodin 3D Generate - Gen-2 Generate",
|
||||
category="api node/3d/Rodin",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("Images"),
|
||||
IO.Int.Input(
|
||||
|
||||
@@ -145,7 +145,6 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
|
||||
short_description="Generate video from a starting frame using Gen3a Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -240,7 +239,6 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that "
|
||||
"your input selections will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
|
||||
short_description="Generate video from a starting frame using Gen4 Turbo.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -339,7 +337,6 @@ class RunwayFirstLastFrameNode(IO.ComfyNode):
|
||||
"Before diving in, review these best practices to ensure that your input selections "
|
||||
"will set your generation up for success: "
|
||||
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
|
||||
short_description="Generate video from first and last keyframes with a prompt.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -446,7 +443,6 @@ class RunwayTextToImageNode(IO.ComfyNode):
|
||||
category="api node/image/Runway",
|
||||
description="Generate an image from a text prompt using Runway's Gen 4 model. "
|
||||
"You can also include reference image to guide the generation.",
|
||||
short_description="Generate an image from text using Runway Gen 4.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -36,7 +36,6 @@ class OpenAIVideoSora2(IO.ComfyNode):
|
||||
display_name="OpenAI Sora - Video",
|
||||
category="api node/video/Sora",
|
||||
description="OpenAI video and audio generation.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -64,7 +64,6 @@ class StabilityStableImageUltraNode(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Image Ultra",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -198,7 +197,6 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode):
|
||||
display_name="Stability AI Stable Diffusion 3.5 Image",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -354,7 +352,6 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Conservative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -457,7 +454,6 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Creative",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
@@ -577,7 +573,6 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
|
||||
display_name="Stability AI Upscale Fast",
|
||||
category="api node/image/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description="Quickly upscale an image to 4x its original size.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
],
|
||||
@@ -630,7 +625,6 @@ class StabilityTextToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Text To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -707,7 +701,6 @@ class StabilityAudioToAudio(IO.ComfyNode):
|
||||
display_name="Stability AI Audio To Audio",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -801,7 +794,6 @@ class StabilityAudioInpaint(IO.ComfyNode):
|
||||
display_name="Stability AI Audio Inpaint",
|
||||
category="api node/audio/Stability AI",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
|
||||
@@ -49,7 +49,6 @@ class TopazImageEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Image Enhance",
|
||||
category="api node/image/Topaz",
|
||||
description="Industry-standard upscaling and image enhancement.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["Reimagine"]),
|
||||
IO.Image.Input("image"),
|
||||
@@ -224,7 +223,6 @@ class TopazVideoEnhance(IO.ComfyNode):
|
||||
display_name="Topaz Video Enhance",
|
||||
category="api node/video/Topaz",
|
||||
description="Breathe new life into video with powerful upscaling and recovery technology.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Boolean.Input("upscaler_enabled", default=True),
|
||||
|
||||
@@ -80,7 +80,6 @@ class TripoTextToModelNode(IO.ComfyNode):
|
||||
node_id="TripoTextToModelNode",
|
||||
display_name="Tripo: Text to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a text prompt using Tripo's API.",
|
||||
inputs=[
|
||||
IO.String.Input("prompt", multiline=True),
|
||||
IO.String.Input("negative_prompt", multiline=True, optional=True),
|
||||
@@ -200,7 +199,6 @@ class TripoImageToModelNode(IO.ComfyNode):
|
||||
node_id="TripoImageToModelNode",
|
||||
display_name="Tripo: Image to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from a single image using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input(
|
||||
@@ -333,7 +331,6 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
|
||||
node_id="TripoMultiviewToModelNode",
|
||||
display_name="Tripo: Multiview to Model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Generate a 3D model from multiple view images using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Image.Input("image_left", optional=True),
|
||||
@@ -473,7 +470,6 @@ class TripoTextureNode(IO.ComfyNode):
|
||||
node_id="TripoTextureNode",
|
||||
display_name="Tripo: Texture model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply textures to an existing 3D model using Tripo's API.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id"),
|
||||
IO.Boolean.Input("texture", default=True, optional=True),
|
||||
@@ -542,7 +538,6 @@ class TripoRefineNode(IO.ComfyNode):
|
||||
display_name="Tripo: Refine Draft model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Refine a draft model created by v1.4 Tripo models only.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"),
|
||||
],
|
||||
@@ -582,8 +577,6 @@ class TripoRigNode(IO.ComfyNode):
|
||||
node_id="TripoRigNode",
|
||||
display_name="Tripo: Rig model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Add a skeleton rig to an existing 3D model using Tripo's API.",
|
||||
short_description="Add a skeleton rig to a 3D model.",
|
||||
inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")],
|
||||
outputs=[
|
||||
IO.String.Output(display_name="model_file"), # for backward compatibility only
|
||||
@@ -621,8 +614,6 @@ class TripoRetargetNode(IO.ComfyNode):
|
||||
node_id="TripoRetargetNode",
|
||||
display_name="Tripo: Retarget rigged model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Apply a preset animation to a rigged 3D model using Tripo's API.",
|
||||
short_description="Apply a preset animation to a rigged model.",
|
||||
inputs=[
|
||||
IO.Custom("RIG_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input(
|
||||
@@ -688,8 +679,6 @@ class TripoConversionNode(IO.ComfyNode):
|
||||
node_id="TripoConversionNode",
|
||||
display_name="Tripo: Convert model",
|
||||
category="api node/3d/Tripo",
|
||||
description="Convert a 3D model to different formats with optional post-processing using Tripo's API.",
|
||||
short_description="Convert a 3D model to different formats.",
|
||||
inputs=[
|
||||
IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"),
|
||||
IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]),
|
||||
|
||||
@@ -46,7 +46,6 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
display_name="Google Veo 2 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 2 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -265,7 +264,6 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
display_name="Google Veo 3 Video Generation",
|
||||
category="api node/video/Veo",
|
||||
description="Generates videos from text prompts using Google's Veo 3 API",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@@ -379,7 +377,6 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
display_name="Google Veo 3 First-Last-Frame to Video",
|
||||
category="api node/video/Veo",
|
||||
description="Generate video using prompt and first and last frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
|
||||
@@ -72,7 +72,6 @@ class ViduTextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Text To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.String.Input(
|
||||
@@ -169,7 +168,6 @@ class ViduImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Image To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from image and optional prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -272,7 +270,6 @@ class ViduReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Reference To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from multiple images and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -386,7 +383,6 @@ class ViduStartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Start End To Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from start and end frames and a prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq1"], tooltip="Model name"),
|
||||
IO.Image.Input(
|
||||
@@ -489,7 +485,6 @@ class Vidu2TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.String.Input(
|
||||
@@ -581,7 +576,6 @@ class Vidu2ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -710,7 +704,6 @@ class Vidu2ReferenceVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Reference-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from multiple reference images and a prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2"]),
|
||||
IO.Autogrow.Input(
|
||||
@@ -844,7 +837,6 @@ class Vidu2StartEndToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu2 Start/End Frame-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from a start frame, an end frame, and a prompt.",
|
||||
short_description="Generate video from start frame, end frame, and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input("first_frame"),
|
||||
@@ -964,7 +956,6 @@ class ViduExtendVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Video Extension",
|
||||
category="api node/video/Vidu",
|
||||
description="Extend an existing video by generating additional frames.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1135,7 +1126,6 @@ class ViduMultiFrameVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Multi-Frame Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video with multiple keyframe transitions.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["viduq2-pro", "viduq2-turbo"]),
|
||||
IO.Image.Input(
|
||||
@@ -1282,7 +1272,6 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Text-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate video from a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
@@ -1391,7 +1380,6 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
|
||||
display_name="Vidu Q3 Image-to-Video Generation",
|
||||
category="api node/video/Vidu",
|
||||
description="Generate a video from an image and an optional prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
|
||||
@@ -175,7 +175,6 @@ class WanTextToImageApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Image",
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -299,7 +298,6 @@ class WanImageToImageApi(IO.ComfyNode):
|
||||
category="api node/image/Wan",
|
||||
description="Generates an image from one or two input images and a text prompt. "
|
||||
"The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).",
|
||||
short_description="Generate an image from input images and a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -426,7 +424,6 @@ class WanTextToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Text to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video based on a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -606,7 +603,6 @@ class WanImageToVideoApi(IO.ComfyNode):
|
||||
display_name="Wan Image to Video",
|
||||
category="api node/video/Wan",
|
||||
description="Generates a video from the first frame and a text prompt.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
@@ -783,7 +779,6 @@ class WanReferenceVideoApi(IO.ComfyNode):
|
||||
category="api node/video/Wan",
|
||||
description="Use the character and voice from input videos, combined with a prompt, "
|
||||
"to generate a new video that maintains character consistency.",
|
||||
short_description="Generate character-consistent video from reference videos and prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["wan2.6-r2v"]),
|
||||
IO.String.Input(
|
||||
|
||||
@@ -30,7 +30,6 @@ class WavespeedFlashVSRNode(IO.ComfyNode):
|
||||
category="api node/video/WaveSpeed",
|
||||
description="Fast, high-quality video upscaler that "
|
||||
"boosts resolution and restores clarity for low-resolution or blurry footage.",
|
||||
short_description="Fast video upscaler that boosts resolution and restores clarity.",
|
||||
inputs=[
|
||||
IO.Video.Input("video"),
|
||||
IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]),
|
||||
@@ -102,7 +101,6 @@ class WavespeedImageUpscaleNode(IO.ComfyNode):
|
||||
display_name="WaveSpeed Image Upscale",
|
||||
category="api node/image/WaveSpeed",
|
||||
description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.",
|
||||
short_description="Upscale images to 4K or 8K with enhanced quality.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]),
|
||||
IO.Image.Input("image"),
|
||||
|
||||
@@ -33,7 +33,6 @@ from .download_helpers import (
|
||||
download_url_to_video_output,
|
||||
)
|
||||
from .upload_helpers import (
|
||||
upload_3d_model_to_comfyapi,
|
||||
upload_audio_to_comfyapi,
|
||||
upload_file_to_comfyapi,
|
||||
upload_image_to_comfyapi,
|
||||
@@ -63,7 +62,6 @@ __all__ = [
|
||||
"sync_op",
|
||||
"sync_op_raw",
|
||||
# Upload helpers
|
||||
"upload_3d_model_to_comfyapi",
|
||||
"upload_audio_to_comfyapi",
|
||||
"upload_file_to_comfyapi",
|
||||
"upload_image_to_comfyapi",
|
||||
|
||||
@@ -57,7 +57,7 @@ def tensor_to_bytesio(
|
||||
image: torch.Tensor,
|
||||
*,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
mime_type: str | None = "image/png",
|
||||
mime_type: str = "image/png",
|
||||
) -> BytesIO:
|
||||
"""Converts a torch.Tensor image to a named BytesIO object.
|
||||
|
||||
|
||||
@@ -164,27 +164,6 @@ async def upload_video_to_comfyapi(
|
||||
return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type, wait_label)
|
||||
|
||||
|
||||
_3D_MIME_TYPES = {
|
||||
"glb": "model/gltf-binary",
|
||||
"obj": "model/obj",
|
||||
"fbx": "application/octet-stream",
|
||||
}
|
||||
|
||||
|
||||
async def upload_3d_model_to_comfyapi(
|
||||
cls: type[IO.ComfyNode],
|
||||
model_3d: Types.File3D,
|
||||
file_format: str,
|
||||
) -> str:
|
||||
"""Uploads a 3D model file to ComfyUI API and returns its download URL."""
|
||||
return await upload_file_to_comfyapi(
|
||||
cls,
|
||||
model_3d.get_data(),
|
||||
f"{uuid.uuid4()}.{file_format}",
|
||||
_3D_MIME_TYPES.get(file_format, "application/octet-stream"),
|
||||
)
|
||||
|
||||
|
||||
async def upload_file_to_comfyapi(
|
||||
cls: type[IO.ComfyNode],
|
||||
file_bytes_io: BytesIO,
|
||||
|
||||
@@ -12,8 +12,6 @@ class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio",
|
||||
category="conditioning",
|
||||
description="Encodes tags and lyrics into conditioning for ACE-Step 1.0 audio generation with adjustable lyrics strength.",
|
||||
short_description="Encodes tags and lyrics for ACE-Step 1.0 audio.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -36,8 +34,6 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio1.5",
|
||||
category="conditioning",
|
||||
description="Encodes tags, lyrics, and music parameters like BPM, key, and language into conditioning for ACE-Step 1.5 audio generation.",
|
||||
short_description="Encodes text and music parameters for ACE-Step 1.5.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
@@ -72,8 +68,6 @@ class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStepLatentAudio",
|
||||
display_name="Empty Ace Step 1.0 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.0 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.0 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
@@ -97,8 +91,6 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
|
||||
node_id="EmptyAceStep1.5LatentAudio",
|
||||
display_name="Empty Ace Step 1.5 Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor for ACE-Step 1.5 with a specified duration and batch size.",
|
||||
short_description="Creates an empty ACE-Step 1.5 audio latent.",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
io.Int.Input(
|
||||
@@ -123,7 +115,6 @@ class ReferenceAudio(io.ComfyNode):
|
||||
category="advanced/conditioning/audio",
|
||||
is_experimental=True,
|
||||
description="This node sets the reference audio for ace step 1.5",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -46,8 +46,6 @@ class SamplerLCMUpscale(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLCMUpscale",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Sampler that progressively upscales the latent during LCM sampling steps, combining denoising with gradual resolution increase.",
|
||||
short_description="LCM sampler with progressive latent upscaling.",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
@@ -95,8 +93,6 @@ class SamplerEulerCFGpp(io.ComfyNode):
|
||||
node_id="SamplerEulerCFGpp",
|
||||
display_name="SamplerEulerCFG++",
|
||||
category="_for_testing", # "sampling/custom_sampling/samplers"
|
||||
description="Euler sampler variant using the CFG++ formulation, which modifies the denoising direction using unconditional predictions for improved guidance.",
|
||||
short_description="Euler sampler using CFG++ guidance formulation.",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
|
||||
@@ -30,8 +30,6 @@ class AlignYourStepsScheduler(io.ComfyNode):
|
||||
node_id="AlignYourStepsScheduler",
|
||||
search_aliases=["AYS scheduler"],
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates an optimized noise schedule using the Align Your Steps method with log-linear interpolation.",
|
||||
short_description="Optimized noise schedule using Align Your Steps.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
|
||||
@@ -17,8 +17,6 @@ class APG(io.ComfyNode):
|
||||
node_id="APG",
|
||||
display_name="Adaptive Projected Guidance",
|
||||
category="sampling/custom_sampling",
|
||||
description="Applies Adaptive Projected Guidance to a model, decomposing CFG guidance into parallel and orthogonal components with optional momentum and norm thresholding for improved sampling quality.",
|
||||
short_description="Decomposes CFG guidance with projection and normalization.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -26,8 +26,6 @@ class UNetSelfAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetSelfAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet self-attention layers by specified multipliers to experiment with attention behavior.",
|
||||
short_description="Scale UNet self-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -51,8 +49,6 @@ class UNetCrossAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetCrossAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output weights of UNet cross-attention layers by specified multipliers to experiment with text-to-image attention.",
|
||||
short_description="Scale UNet cross-attention Q/K/V/Out weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -77,8 +73,6 @@ class CLIPAttentionMultiply(io.ComfyNode):
|
||||
node_id="CLIPAttentionMultiply",
|
||||
search_aliases=["clip attention scale", "text encoder attention"],
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the query, key, value, and output projection weights of CLIP text encoder self-attention layers by specified multipliers.",
|
||||
short_description="Scale CLIP text encoder attention weights.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
@@ -113,8 +107,6 @@ class UNetTemporalAttentionMultiply(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="UNetTemporalAttentionMultiply",
|
||||
category="_for_testing/attention_experiments",
|
||||
description="Scales the output weights of UNet temporal and structural attention layers independently, allowing fine-grained control over video model attention behavior.",
|
||||
short_description="Scale UNet temporal and structural attention weights.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EmptyLatentAudio(IO.ComfyNode):
|
||||
node_id="EmptyLatentAudio",
|
||||
display_name="Empty Latent Audio",
|
||||
category="latent/audio",
|
||||
description="Creates an empty latent audio tensor with a specified duration and batch size for Stable Audio generation.",
|
||||
short_description="Creates an empty latent audio tensor.",
|
||||
inputs=[
|
||||
IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
|
||||
IO.Int.Input(
|
||||
@@ -45,8 +43,6 @@ class ConditioningStableAudio(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="ConditioningStableAudio",
|
||||
category="conditioning",
|
||||
description="Sets the start time and total duration on Stable Audio positive and negative conditioning.",
|
||||
short_description="Sets timing parameters on Stable Audio conditioning.",
|
||||
inputs=[
|
||||
IO.Conditioning.Input("positive"),
|
||||
IO.Conditioning.Input("negative"),
|
||||
@@ -76,8 +72,6 @@ class VAEEncodeAudio(IO.ComfyNode):
|
||||
search_aliases=["audio to latent"],
|
||||
display_name="VAE Encode Audio",
|
||||
category="latent/audio",
|
||||
description="Encodes an audio waveform into a latent representation using a VAE, resampling if needed.",
|
||||
short_description="Encodes audio into latent via VAE.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -121,8 +115,6 @@ class VAEDecodeAudio(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation back into an audio waveform using a VAE.",
|
||||
short_description="Decodes latent into audio via VAE.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -145,8 +137,6 @@ class VAEDecodeAudioTiled(IO.ComfyNode):
|
||||
search_aliases=["latent to audio"],
|
||||
display_name="VAE Decode Audio (Tiled)",
|
||||
category="latent/audio",
|
||||
description="Decodes a latent representation into audio using tiled VAE decoding to reduce memory usage.",
|
||||
short_description="Tiled VAE decoding of latent into audio.",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
@@ -169,8 +159,6 @@ class SaveAudio(IO.ComfyNode):
|
||||
search_aliases=["export flac"],
|
||||
display_name="Save Audio (FLAC)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in FLAC format with a configurable filename prefix.",
|
||||
short_description="Saves audio to disk in FLAC format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -196,8 +184,6 @@ class SaveAudioMP3(IO.ComfyNode):
|
||||
search_aliases=["export mp3"],
|
||||
display_name="Save Audio (MP3)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in MP3 format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in MP3 format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -226,8 +212,6 @@ class SaveAudioOpus(IO.ComfyNode):
|
||||
search_aliases=["export opus"],
|
||||
display_name="Save Audio (Opus)",
|
||||
category="audio",
|
||||
description="Saves audio to disk in Opus format with configurable quality and filename prefix.",
|
||||
short_description="Saves audio to disk in Opus format.",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
@@ -256,8 +240,6 @@ class PreviewAudio(IO.ComfyNode):
|
||||
search_aliases=["play audio"],
|
||||
display_name="Preview Audio",
|
||||
category="audio",
|
||||
description="Plays back audio in the UI for previewing.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
],
|
||||
@@ -318,8 +300,6 @@ class LoadAudio(IO.ComfyNode):
|
||||
search_aliases=["import audio", "open audio", "audio file"],
|
||||
display_name="Load Audio",
|
||||
category="audio",
|
||||
description="Loads an audio or video file from disk and outputs the audio as a single Audio output.",
|
||||
short_description="Loads an audio file from disk.",
|
||||
inputs=[
|
||||
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
|
||||
],
|
||||
@@ -358,7 +338,6 @@ class RecordAudio(IO.ComfyNode):
|
||||
search_aliases=["microphone input", "audio capture", "voice input"],
|
||||
display_name="Record Audio",
|
||||
category="audio",
|
||||
description="Records audio from a microphone input and outputs the captured audio.",
|
||||
inputs=[
|
||||
IO.Custom("AUDIO_RECORD").Input("audio"),
|
||||
],
|
||||
@@ -384,7 +363,6 @@ class TrimAudioDuration(IO.ComfyNode):
|
||||
search_aliases=["cut audio", "audio clip", "shorten audio"],
|
||||
display_name="Trim Audio Duration",
|
||||
description="Trim audio tensor into chosen time range.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -438,7 +416,6 @@ class SplitAudioChannels(IO.ComfyNode):
|
||||
search_aliases=["stereo to mono"],
|
||||
display_name="Split Audio Channels",
|
||||
description="Separates the audio into left and right channels.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
@@ -471,7 +448,6 @@ class JoinAudioChannels(IO.ComfyNode):
|
||||
node_id="JoinAudioChannels",
|
||||
display_name="Join Audio Channels",
|
||||
description="Joins left and right mono audio channels into a stereo audio.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio_left"),
|
||||
@@ -541,7 +517,6 @@ class AudioConcat(IO.ComfyNode):
|
||||
search_aliases=["join audio", "combine audio", "append audio"],
|
||||
display_name="Audio Concat",
|
||||
description="Concatenates the audio1 to audio2 in the specified direction.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -590,7 +565,6 @@ class AudioMerge(IO.ComfyNode):
|
||||
search_aliases=["mix audio", "overlay audio", "layer audio"],
|
||||
display_name="Audio Merge",
|
||||
description="Combine two audio tracks by overlaying their waveforms.",
|
||||
short_description=None,
|
||||
category="audio",
|
||||
inputs=[
|
||||
IO.Audio.Input("audio1"),
|
||||
@@ -652,8 +626,6 @@ class AudioAdjustVolume(IO.ComfyNode):
|
||||
search_aliases=["audio gain", "loudness", "audio level"],
|
||||
display_name="Audio Adjust Volume",
|
||||
category="audio",
|
||||
description="Adjusts audio volume by a specified number of decibels.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
IO.Audio.Input("audio"),
|
||||
IO.Int.Input(
|
||||
@@ -690,8 +662,6 @@ class EmptyAudio(IO.ComfyNode):
|
||||
search_aliases=["blank audio"],
|
||||
display_name="Empty Audio",
|
||||
category="audio",
|
||||
description="Creates a silent audio clip with configurable duration, sample rate, and channel count.",
|
||||
short_description="Creates a silent audio clip.",
|
||||
inputs=[
|
||||
IO.Float.Input(
|
||||
"duration",
|
||||
|
||||
@@ -11,8 +11,6 @@ class AudioEncoderLoader(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderLoader",
|
||||
category="loaders",
|
||||
description="Loads an audio encoder model from a checkpoint file for encoding audio into embeddings.",
|
||||
short_description="Loads an audio encoder model from a checkpoint.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"audio_encoder_name",
|
||||
@@ -38,8 +36,6 @@ class AudioEncoderEncode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AudioEncoderEncode",
|
||||
category="conditioning",
|
||||
description="Encodes audio into embeddings using a loaded audio encoder model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.AudioEncoder.Input("audio_encoder"),
|
||||
io.Audio.Input("audio"),
|
||||
|
||||
@@ -154,8 +154,6 @@ class WanCameraEmbedding(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="WanCameraEmbedding",
|
||||
category="camera",
|
||||
description="Generates Plucker camera embeddings from a selected camera motion trajectory for Wan video generation.",
|
||||
short_description="Generates camera embeddings for Wan video generation.",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"camera_pose",
|
||||
|
||||
@@ -12,8 +12,6 @@ class Canny(io.ComfyNode):
|
||||
node_id="Canny",
|
||||
search_aliases=["edge detection", "outline", "contour detection", "line art"],
|
||||
category="image/preprocessors",
|
||||
description="Detects edges in an image using the Canny edge detection algorithm with configurable low and high thresholds.",
|
||||
short_description="Canny edge detection on images.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),
|
||||
|
||||
@@ -27,8 +27,6 @@ class CFGZeroStar(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGZeroStar",
|
||||
category="advanced/guidance",
|
||||
description="Applies CFG-Zero* post-CFG correction that computes an optimal scaling factor between conditional and unconditional predictions to reduce CFG artifacts.",
|
||||
short_description="CFG-Zero* guidance correction to reduce artifacts.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
@@ -56,8 +54,6 @@ class CFGNorm(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGNorm",
|
||||
category="advanced/guidance",
|
||||
description="Constrains the CFG-guided prediction norm to not exceed the conditional prediction norm, helping to prevent oversaturation at high CFG scales.",
|
||||
short_description="Constrain CFG output norm to conditional prediction norm.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
|
||||
@@ -14,8 +14,6 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyChromaRadianceLatentImage",
|
||||
category="latent/chroma_radiance",
|
||||
description="Creates an empty Chroma Radiance latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Chroma Radiance latent image.",
|
||||
inputs=[
|
||||
io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -37,7 +35,6 @@ class ChromaRadianceOptions(io.ComfyNode):
|
||||
node_id="ChromaRadianceOptions",
|
||||
category="model_patches/chroma_radiance",
|
||||
description="Allows setting advanced options for the Chroma Radiance model.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input(id="model"),
|
||||
io.Boolean.Input(
|
||||
|
||||
@@ -10,8 +10,6 @@ class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXLRefiner",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text for SDXL refiner models with aesthetic score and resolution conditioning parameters.",
|
||||
short_description="Encodes text for SDXL refiner models.",
|
||||
inputs=[
|
||||
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
@@ -33,8 +31,6 @@ class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXL",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate G and L text prompts for SDXL models with resolution and crop conditioning parameters.",
|
||||
short_description="Encodes dual text prompts for SDXL models.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
|
||||
@@ -10,7 +10,6 @@ class ColorToRGBInt(io.ComfyNode):
|
||||
display_name="Color to RGB Int",
|
||||
category="utils",
|
||||
description="Convert a color to a RGB integer value.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Color.Input("color"),
|
||||
],
|
||||
|
||||
@@ -112,8 +112,6 @@ class PorterDuffImageComposite(io.ComfyNode):
|
||||
search_aliases=["alpha composite", "blend modes", "layer blend", "transparency blend"],
|
||||
display_name="Porter-Duff Image Composite",
|
||||
category="mask/compositing",
|
||||
description="Composites two images with alpha masks using Porter-Duff blend modes.",
|
||||
short_description="",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
@@ -171,8 +169,6 @@ class SplitImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["extract alpha", "separate transparency", "remove alpha"],
|
||||
display_name="Split Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Separates an RGBA image into its RGB color channels and an alpha transparency mask.",
|
||||
short_description="Split RGBA image into RGB and alpha mask.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -197,8 +193,6 @@ class JoinImageWithAlpha(io.ComfyNode):
|
||||
search_aliases=["add transparency", "apply alpha", "composite alpha", "RGBA"],
|
||||
display_name="Join Image with Alpha",
|
||||
category="mask/compositing",
|
||||
description="Combines an RGB image with an alpha mask to produce an RGBA image with transparency.",
|
||||
short_description="Combine RGB image and alpha into RGBA.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
|
||||
@@ -9,8 +9,6 @@ class CLIPTextEncodeControlnet(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeControlnet",
|
||||
category="_for_testing/conditioning",
|
||||
description="Encodes text with CLIP and attaches the result as cross-attention controlnet conditioning to existing conditioning data.",
|
||||
short_description="CLIP text encode for controlnet cross-attention conditioning.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -38,8 +36,6 @@ class T5TokenizerOptions(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="T5TokenizerOptions",
|
||||
category="_for_testing/conditioning",
|
||||
description="Configures minimum padding and length options for T5-family tokenizers used in CLIP text encoding.",
|
||||
short_description="Set T5 tokenizer padding and length options.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),
|
||||
|
||||
@@ -12,7 +12,6 @@ class ContextWindowsManualNode(io.ComfyNode):
|
||||
display_name="Context Windows (Manual)",
|
||||
category="context",
|
||||
description="Manually set context windows.",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."),
|
||||
@@ -66,7 +65,6 @@ class WanContextWindowsManualNode(ContextWindowsManualNode):
|
||||
schema.node_id = "WanContextWindowsManual"
|
||||
schema.display_name = "WAN Context Windows (Manual)"
|
||||
schema.description = "Manually set context windows for WAN-like models (dim=2)."
|
||||
schema.short_description = None
|
||||
schema.inputs = [
|
||||
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),
|
||||
io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."),
|
||||
|
||||
@@ -10,8 +10,6 @@ class SetUnionControlNetType(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetUnionControlNetType",
|
||||
category="conditioning/controlnet",
|
||||
description="Sets the control type for a Union ControlNet, selecting which conditioning mode to use.",
|
||||
short_description="Select control mode for Union ControlNet.",
|
||||
inputs=[
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
|
||||
@@ -42,8 +40,6 @@ class ControlNetInpaintingAliMamaApply(io.ComfyNode):
|
||||
node_id="ControlNetInpaintingAliMamaApply",
|
||||
search_aliases=["masked controlnet"],
|
||||
category="conditioning/controlnet",
|
||||
description="Applies an AliMama inpainting ControlNet to positive and negative conditioning using an image and mask with VAE encoding.",
|
||||
short_description="Applies AliMama inpainting ControlNet with mask.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
@@ -14,7 +14,6 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyCosmosLatentVideo",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for Cosmos video generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -47,8 +46,6 @@ class CosmosImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos by encoding optional start and end images with a noise mask.",
|
||||
short_description="Cosmos inpainting video latent from start/end images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -92,8 +89,6 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CosmosPredict2ImageToVideoLatent",
|
||||
category="conditioning/inpaint",
|
||||
description="Creates an inpainting video latent for Cosmos Predict2 by encoding optional start and end images with Wan latent format processing.",
|
||||
short_description="Cosmos Predict2 inpainting video latent from images.",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
|
||||
@@ -18,8 +18,6 @@ class BasicScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule from a model using a selected scheduler algorithm, step count, and denoise strength.",
|
||||
short_description="Generate sigma schedule from model and scheduler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES),
|
||||
@@ -50,8 +48,6 @@ class KarrasScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KarrasScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Karras noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using Karras noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -74,8 +70,6 @@ class ExponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="ExponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using an exponential noise schedule with configurable sigma range.",
|
||||
short_description="Generate sigmas using exponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -97,8 +91,6 @@ class PolyexponentialScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="PolyexponentialScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a polyexponential noise schedule with configurable sigma range and rho parameter.",
|
||||
short_description="Generate sigmas using polyexponential noise schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -121,8 +113,6 @@ class LaplaceScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="LaplaceScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a Laplace distribution-based noise schedule with configurable mu and beta parameters.",
|
||||
short_description="Generate sigmas using Laplace distribution schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False),
|
||||
@@ -147,8 +137,6 @@ class SDTurboScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SDTurboScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule optimized for SD Turbo models with very few steps and adjustable denoise strength.",
|
||||
short_description="Generate sigma schedule for SD Turbo models.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=1, min=1, max=10),
|
||||
@@ -173,8 +161,6 @@ class BetaSamplingScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BetaSamplingScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using a beta distribution with configurable alpha and beta shape parameters.",
|
||||
short_description="Generate sigmas using beta distribution schedule.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
@@ -197,8 +183,6 @@ class VPScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="VPScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule using the Variance Preserving (VP) SDE formulation with configurable beta and epsilon parameters.",
|
||||
short_description="Generate sigmas using VP SDE schedule.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values
|
||||
@@ -221,8 +205,6 @@ class SplitSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions at a specified step index for multi-pass sampling.",
|
||||
short_description="Split sigmas into high and low at a step.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("step", default=0, min=0, max=10000),
|
||||
@@ -247,8 +229,6 @@ class SplitSigmasDenoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SplitSigmasDenoise",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Splits a sigma sequence into high and low portions based on a denoise ratio for multi-pass sampling workflows.",
|
||||
short_description="Split sigmas by denoise ratio.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
@@ -275,8 +255,6 @@ class FlipSigmas(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FlipSigmas",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Reverses the order of a sigma sequence, useful for converting between ascending and descending noise schedules.",
|
||||
short_description="Reverse the order of a sigma sequence.",
|
||||
inputs=[io.Sigmas.Input("sigmas")],
|
||||
outputs=[io.Sigmas.Output()]
|
||||
)
|
||||
@@ -299,8 +277,6 @@ class SetFirstSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SetFirstSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Overrides the first sigma value in a sequence with a custom value, allowing manual control of the initial noise level.",
|
||||
short_description="Override the first sigma value in a sequence.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False),
|
||||
@@ -323,8 +299,6 @@ class ExtendIntermediateSigmas(io.ComfyNode):
|
||||
node_id="ExtendIntermediateSigmas",
|
||||
search_aliases=["interpolate sigmas"],
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Interpolates additional intermediate sigma values between existing steps using selectable spacing within a specified sigma range.",
|
||||
short_description="Interpolate additional sigma steps between existing values.",
|
||||
inputs=[
|
||||
io.Sigmas.Input("sigmas"),
|
||||
io.Int.Input("steps", default=2, min=1, max=100),
|
||||
@@ -378,8 +352,6 @@ class SamplingPercentToSigma(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplingPercentToSigma",
|
||||
category="sampling/custom_sampling/sigmas",
|
||||
description="Converts a sampling percentage (0.0 to 1.0) to the corresponding sigma value using a model's noise schedule.",
|
||||
short_description="Convert sampling percentage to sigma value.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001),
|
||||
@@ -408,8 +380,6 @@ class KSamplerSelect(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="KSamplerSelect",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Selects a sampler algorithm by name from the list of available samplers and outputs the sampler object.",
|
||||
short_description="Select a sampler algorithm by name.",
|
||||
inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -427,8 +397,6 @@ class SamplerDPMPP_3M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_3M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 3M SDE sampler with configurable eta, noise scale, and GPU or CPU noise generation.",
|
||||
short_description="Create a DPM++ 3M SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -454,8 +422,6 @@ class SamplerDPMPP_2M_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2M_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2M SDE sampler with configurable solver type, eta, noise scale, and noise device.",
|
||||
short_description="Create a DPM++ 2M SDE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=['midpoint', 'heun']),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -483,8 +449,6 @@ class SamplerDPMPP_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ SDE sampler with configurable eta, noise scale, r parameter, and noise device.",
|
||||
short_description="Create a DPM++ SDE sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -511,8 +475,6 @@ class SamplerDPMPP_2S_Ancestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMPP_2S_Ancestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM++ 2S Ancestral sampler with configurable eta and noise scale parameters.",
|
||||
short_description="Create a DPM++ 2S Ancestral sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -533,8 +495,6 @@ class SamplerEulerAncestral(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerEulerAncestral",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral sampler with configurable eta and noise scale for stochastic sampling.",
|
||||
short_description="Create an Euler Ancestral stochastic sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -556,8 +516,6 @@ class SamplerEulerAncestralCFGPP(io.ComfyNode):
|
||||
node_id="SamplerEulerAncestralCFGPP",
|
||||
display_name="SamplerEulerAncestralCFG++",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an Euler Ancestral CFG++ sampler that applies classifier-free guidance with improved stability.",
|
||||
short_description="Create an Euler Ancestral CFG++ sampler.",
|
||||
inputs=[
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False),
|
||||
io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -580,8 +538,6 @@ class SamplerLMS(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerLMS",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a Linear Multi-Step (LMS) sampler with a configurable order parameter.",
|
||||
short_description="Create a Linear Multi-Step (LMS) sampler.",
|
||||
inputs=[io.Int.Input("order", default=4, min=1, max=100)],
|
||||
outputs=[io.Sampler.Output()]
|
||||
)
|
||||
@@ -599,8 +555,6 @@ class SamplerDPMAdaptative(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerDPMAdaptative",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates a DPM Adaptive sampler with configurable order, tolerances, PID coefficients, and stochastic noise parameters for adaptive step-size sampling.",
|
||||
short_description="Create a DPM Adaptive step-size sampler.",
|
||||
inputs=[
|
||||
io.Int.Input("order", default=3, min=2, max=3),
|
||||
io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False),
|
||||
@@ -632,8 +586,6 @@ class SamplerER_SDE(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerER_SDE",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an ER-SDE sampler supporting ER-SDE, reverse-time SDE, and ODE solver types with configurable stochastic strength and staging.",
|
||||
short_description="Create an ER-SDE, reverse-time SDE, or ODE sampler.",
|
||||
inputs=[
|
||||
io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]),
|
||||
io.Int.Input("max_stage", default=3, min=1, max=3),
|
||||
@@ -672,8 +624,6 @@ class SamplerSASolver(io.ComfyNode):
|
||||
node_id="SamplerSASolver",
|
||||
search_aliases=["sde"],
|
||||
category="sampling/custom_sampling/samplers",
|
||||
description="Creates an SA-Solver sampler with configurable predictor/corrector orders, SDE region, and PECE mode for high-order diffusion sampling.",
|
||||
short_description="Create an SA-Solver high-order diffusion sampler.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False),
|
||||
@@ -734,8 +684,7 @@ class SamplerSEEDS2(io.ComfyNode):
|
||||
"- solver_type=phi_2, r=1.0, eta=0.0\n\n"
|
||||
"exp_heun_2_x0_sde\n"
|
||||
"- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0"
|
||||
),
|
||||
short_description="SEEDS2 sampler with configurable solver and SDE settings.",
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -779,8 +728,6 @@ class SamplerCustom(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustom",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs a complete custom sampling pass by combining a model, sampler, sigmas, and conditioning with optional noise injection.",
|
||||
short_description="Run custom sampling with manual sampler and sigmas.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Boolean.Input("add_noise", default=True),
|
||||
@@ -847,8 +794,6 @@ class BasicGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="BasicGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a basic guider that applies a single conditioning input to guide the diffusion model without classifier-free guidance.",
|
||||
short_description="Create a single-conditioning guider without CFG.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
@@ -870,8 +815,6 @@ class CFGGuider(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CFGGuider",
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a classifier-free guidance guider that combines positive and negative conditioning with an adjustable CFG scale.",
|
||||
short_description="Create a CFG guider with positive/negative conditioning.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
@@ -926,8 +869,6 @@ class DualCFGGuider(io.ComfyNode):
|
||||
node_id="DualCFGGuider",
|
||||
search_aliases=["dual prompt guidance"],
|
||||
category="sampling/custom_sampling/guiders",
|
||||
description="Creates a dual classifier-free guidance guider that blends two conditioning inputs against a negative with independent CFG scales and regular or nested styles.",
|
||||
short_description="Create a dual CFG guider with two conditionings.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("cond1"),
|
||||
@@ -956,8 +897,6 @@ class DisableNoise(io.ComfyNode):
|
||||
node_id="DisableNoise",
|
||||
search_aliases=["zero noise"],
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a zero-noise source that disables noise injection, useful for deterministic sampling or img2img without added noise.",
|
||||
short_description="Produce zero noise to disable noise injection.",
|
||||
inputs=[],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -975,8 +914,6 @@ class RandomNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="RandomNoise",
|
||||
category="sampling/custom_sampling/noise",
|
||||
description="Produces a random noise source from a seed value for use in custom sampling workflows.",
|
||||
short_description="Produce seeded random noise for sampling.",
|
||||
inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)],
|
||||
outputs=[io.Noise.Output()]
|
||||
)
|
||||
@@ -994,8 +931,6 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="SamplerCustomAdvanced",
|
||||
category="sampling/custom_sampling",
|
||||
description="Runs an advanced custom sampling pass using separate noise, guider, sampler, and sigmas inputs for maximum control over the diffusion process.",
|
||||
short_description="Run advanced custom sampling with separate components.",
|
||||
inputs=[
|
||||
io.Noise.Input("noise"),
|
||||
io.Guider.Input("guider"),
|
||||
@@ -1050,8 +985,6 @@ class AddNoise(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="AddNoise",
|
||||
category="_for_testing/custom_sampling/noise",
|
||||
description="Adds scaled noise to a latent image using the model's noise schedule and sigma values for manual noise injection.",
|
||||
short_description="Add scaled noise to a latent image.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
@@ -1102,8 +1035,6 @@ class ManualSigmas(io.ComfyNode):
|
||||
node_id="ManualSigmas",
|
||||
search_aliases=["custom noise schedule", "define sigmas"],
|
||||
category="_for_testing/custom_sampling",
|
||||
description="Defines a custom sigma sequence by manually entering comma-separated numeric values as a text string.",
|
||||
short_description="Define custom sigmas from comma-separated values.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input("sigmas", default="1, 0.5", multiline=False)
|
||||
|
||||
@@ -49,8 +49,6 @@ class LoadImageDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageDataSetFromFolder",
|
||||
display_name="Load Image Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads all images from a selected input subfolder and outputs them as a list of image tensors.",
|
||||
short_description="Loads images from a folder as a list.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -88,8 +86,6 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
|
||||
node_id="LoadImageTextDataSetFromFolder",
|
||||
display_name="Load Image and Text Dataset from Folder",
|
||||
category="dataset",
|
||||
description="Loads paired images and text captions from a folder, matching each image with its corresponding text file.",
|
||||
short_description="Loads paired images and text captions from folder.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
@@ -212,8 +208,6 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageDataSetToFolder",
|
||||
display_name="Save Image Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves a list of images to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves image list to an output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive images as list
|
||||
@@ -253,8 +247,6 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
|
||||
node_id="SaveImageTextDataSetToFolder",
|
||||
display_name="Save Image and Text Dataset to Folder",
|
||||
category="dataset",
|
||||
description="Saves paired images and text captions to a named folder in the output directory with configurable filename prefix.",
|
||||
short_description="Saves paired images and text to output folder.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive both images and texts as lists
|
||||
@@ -409,8 +401,6 @@ class ImageProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/image",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -560,8 +550,6 @@ class TextProcessingNode(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id=cls.node_id,
|
||||
display_name=cls.display_name or cls.node_id,
|
||||
description=getattr(cls, 'description', ''),
|
||||
short_description=getattr(cls, 'short_description', ''),
|
||||
category="dataset/text",
|
||||
is_experimental=True,
|
||||
is_input_list=is_group, # True for group, False for individual
|
||||
@@ -639,7 +627,6 @@ class ResizeImagesByShorterEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByShorterEdge"
|
||||
display_name = "Resize Images by Shorter Edge"
|
||||
description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by shorter edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"shorter_edge",
|
||||
@@ -668,7 +655,6 @@ class ResizeImagesByLongerEdgeNode(ImageProcessingNode):
|
||||
node_id = "ResizeImagesByLongerEdge"
|
||||
display_name = "Resize Images by Longer Edge"
|
||||
description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio."
|
||||
short_description = "Resizes images by longer edge preserving aspect ratio."
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"longer_edge",
|
||||
@@ -700,7 +686,6 @@ class CenterCropImagesNode(ImageProcessingNode):
|
||||
node_id = "CenterCropImages"
|
||||
display_name = "Center Crop Images"
|
||||
description = "Center crop all images to the specified dimensions."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -723,7 +708,6 @@ class RandomCropImagesNode(ImageProcessingNode):
|
||||
description = (
|
||||
"Randomly crop all images to the specified dimensions (for data augmentation)."
|
||||
)
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."),
|
||||
io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."),
|
||||
@@ -750,7 +734,6 @@ class NormalizeImagesNode(ImageProcessingNode):
|
||||
node_id = "NormalizeImages"
|
||||
display_name = "Normalize Images"
|
||||
description = "Normalize images using mean and standard deviation."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"mean",
|
||||
@@ -777,7 +760,6 @@ class AdjustBrightnessNode(ImageProcessingNode):
|
||||
node_id = "AdjustBrightness"
|
||||
display_name = "Adjust Brightness"
|
||||
description = "Adjust brightness of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -797,7 +779,6 @@ class AdjustContrastNode(ImageProcessingNode):
|
||||
node_id = "AdjustContrast"
|
||||
display_name = "Adjust Contrast"
|
||||
description = "Adjust contrast of all images."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
"factor",
|
||||
@@ -817,7 +798,6 @@ class ShuffleDatasetNode(ImageProcessingNode):
|
||||
node_id = "ShuffleDataset"
|
||||
display_name = "Shuffle Image Dataset"
|
||||
description = "Randomly shuffle the order of images in the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to shuffle
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
@@ -841,8 +821,6 @@ class ShuffleImageTextDatasetNode(io.ComfyNode):
|
||||
node_id="ShuffleImageTextDataset",
|
||||
display_name="Shuffle Image-Text Dataset",
|
||||
category="dataset/image",
|
||||
description="Randomly shuffles paired image and text lists together using a seed, preserving their correspondence.",
|
||||
short_description="Shuffles paired image-text lists together.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -885,7 +863,6 @@ class TextToLowercaseNode(TextProcessingNode):
|
||||
node_id = "TextToLowercase"
|
||||
display_name = "Text to Lowercase"
|
||||
description = "Convert all texts to lowercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -896,7 +873,6 @@ class TextToUppercaseNode(TextProcessingNode):
|
||||
node_id = "TextToUppercase"
|
||||
display_name = "Text to Uppercase"
|
||||
description = "Convert all texts to uppercase."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -907,7 +883,6 @@ class TruncateTextNode(TextProcessingNode):
|
||||
node_id = "TruncateText"
|
||||
display_name = "Truncate Text"
|
||||
description = "Truncate all texts to a maximum length."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.Int.Input(
|
||||
"max_length", default=77, min=1, max=10000, tooltip="Maximum text length."
|
||||
@@ -923,7 +898,6 @@ class AddTextPrefixNode(TextProcessingNode):
|
||||
node_id = "AddTextPrefix"
|
||||
display_name = "Add Text Prefix"
|
||||
description = "Add a prefix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("prefix", default="", tooltip="Prefix to add."),
|
||||
]
|
||||
@@ -937,7 +911,6 @@ class AddTextSuffixNode(TextProcessingNode):
|
||||
node_id = "AddTextSuffix"
|
||||
display_name = "Add Text Suffix"
|
||||
description = "Add a suffix to all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("suffix", default="", tooltip="Suffix to add."),
|
||||
]
|
||||
@@ -951,7 +924,6 @@ class ReplaceTextNode(TextProcessingNode):
|
||||
node_id = "ReplaceText"
|
||||
display_name = "Replace Text"
|
||||
description = "Replace text in all texts."
|
||||
short_description = None
|
||||
extra_inputs = [
|
||||
io.String.Input("find", default="", tooltip="Text to find."),
|
||||
io.String.Input("replace", default="", tooltip="Text to replace with."),
|
||||
@@ -966,7 +938,6 @@ class StripWhitespaceNode(TextProcessingNode):
|
||||
node_id = "StripWhitespace"
|
||||
display_name = "Strip Whitespace"
|
||||
description = "Strip leading and trailing whitespace from all texts."
|
||||
short_description = None
|
||||
|
||||
@classmethod
|
||||
def _process(cls, text):
|
||||
@@ -982,7 +953,6 @@ class ImageDeduplicationNode(ImageProcessingNode):
|
||||
node_id = "ImageDeduplication"
|
||||
display_name = "Image Deduplication"
|
||||
description = "Remove duplicate or very similar images from the dataset."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to compare images
|
||||
extra_inputs = [
|
||||
io.Float.Input(
|
||||
@@ -1053,7 +1023,6 @@ class ImageGridNode(ImageProcessingNode):
|
||||
node_id = "ImageGrid"
|
||||
display_name = "Image Grid"
|
||||
description = "Arrange multiple images into a grid layout."
|
||||
short_description = None
|
||||
is_group_process = True # Requires full list to create grid
|
||||
is_output_list = False # Outputs single grid image
|
||||
extra_inputs = [
|
||||
@@ -1128,7 +1097,6 @@ class MergeImageListsNode(ImageProcessingNode):
|
||||
node_id = "MergeImageLists"
|
||||
display_name = "Merge Image Lists"
|
||||
description = "Concatenate multiple image lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives images as list
|
||||
|
||||
@classmethod
|
||||
@@ -1146,7 +1114,6 @@ class MergeTextListsNode(TextProcessingNode):
|
||||
node_id = "MergeTextLists"
|
||||
display_name = "Merge Text Lists"
|
||||
description = "Concatenate multiple text lists into one."
|
||||
short_description = None
|
||||
is_group_process = True # Receives texts as list
|
||||
|
||||
@classmethod
|
||||
@@ -1170,8 +1137,6 @@ class ResolutionBucket(io.ComfyNode):
|
||||
node_id="ResolutionBucket",
|
||||
display_name="Resolution Bucket",
|
||||
category="dataset",
|
||||
description="Groups latents and conditioning by resolution into batched buckets for efficient training with mixed aspect ratios.",
|
||||
short_description="Groups latents by resolution into training buckets.",
|
||||
is_experimental=True,
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
@@ -1265,8 +1230,6 @@ class MakeTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["encode dataset"],
|
||||
display_name="Make Training Dataset",
|
||||
category="dataset",
|
||||
description="Encodes images with a VAE and text captions with CLIP to create paired latent and conditioning training data.",
|
||||
short_description="Encodes images and text into training data.",
|
||||
is_experimental=True,
|
||||
is_input_list=True, # images and texts as lists
|
||||
inputs=[
|
||||
@@ -1353,8 +1316,6 @@ class SaveTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["export training data"],
|
||||
display_name="Save Training Dataset",
|
||||
category="dataset",
|
||||
description="Saves encoded latent and conditioning training data to disk in sharded files with configurable shard size.",
|
||||
short_description="Saves encoded training data to sharded files.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
is_input_list=True, # Receive lists
|
||||
@@ -1456,8 +1417,6 @@ class LoadTrainingDataset(io.ComfyNode):
|
||||
search_aliases=["import dataset", "training data"],
|
||||
display_name="Load Training Dataset",
|
||||
category="dataset",
|
||||
description="Loads a previously saved training dataset of latents and conditioning from sharded files on disk.",
|
||||
short_description="Loads saved training dataset from disk.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
|
||||
@@ -14,8 +14,6 @@ class DifferentialDiffusion(io.ComfyNode):
|
||||
search_aliases=["inpaint gradient", "variable denoise strength"],
|
||||
display_name="Differential Diffusion",
|
||||
category="_for_testing",
|
||||
description="Enables per-pixel variable denoise strength using a mask, where mask intensity controls how much each region is denoised during sampling.",
|
||||
short_description="Per-pixel variable denoise strength via mask.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
|
||||
@@ -363,7 +363,6 @@ class EasyCacheNode(io.ComfyNode):
|
||||
node_id="EasyCache",
|
||||
display_name="EasyCache",
|
||||
description="Native EasyCache implementation.",
|
||||
short_description=None,
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
@@ -497,7 +496,6 @@ class LazyCacheNode(io.ComfyNode):
|
||||
node_id="LazyCache",
|
||||
display_name="LazyCache",
|
||||
description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.",
|
||||
short_description="Simpler EasyCache alternative with universal ComfyUI compatibility.",
|
||||
category="advanced/debug/model",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
|
||||
@@ -10,7 +10,6 @@ class ReferenceLatent(io.ComfyNode):
|
||||
node_id="ReferenceLatent",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
short_description="Sets guiding latent for edit models with chaining support.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
|
||||
@@ -19,8 +19,6 @@ class EpsilonScaling(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Epsilon Scaling",
|
||||
category="model_patches/unet",
|
||||
description="Applies epsilon scaling to mitigate exposure bias in diffusion models by scaling the predicted noise after CFG, improving sample quality.",
|
||||
short_description="Scale predicted noise to reduce exposure bias.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
@@ -123,7 +121,6 @@ class TemporalScoreRescaling(io.ComfyNode):
|
||||
"TSR - Temporal Score Rescaling (2510.01184)\n\n"
|
||||
"Rescaling the model's score or noise to steer the sampling diversity.\n"
|
||||
),
|
||||
short_description="Rescales temporal scores to control sampling diversity.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -13,8 +13,6 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Encodes separate CLIP-L and T5-XXL text prompts with a guidance value into Flux conditioning.",
|
||||
short_description="Encodes CLIP-L and T5-XXL prompts for Flux.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
@@ -42,8 +40,6 @@ class EmptyFlux2LatentImage(io.ComfyNode):
|
||||
node_id="EmptyFlux2LatentImage",
|
||||
display_name="Empty Flux 2 Latent",
|
||||
category="latent",
|
||||
description="Creates an empty Flux 2 latent image tensor with the specified width, height, and batch size.",
|
||||
short_description="Creates an empty Flux 2 latent image tensor.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -65,8 +61,6 @@ class FluxGuidance(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Sets the guidance strength value on Flux conditioning to control how closely generation follows the prompt.",
|
||||
short_description="Sets guidance strength on Flux conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
@@ -91,7 +85,6 @@ class FluxDisableGuidance(io.ComfyNode):
|
||||
node_id="FluxDisableGuidance",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
short_description="Disables guidance embed on Flux and Flux-like models.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
@@ -136,7 +129,6 @@ class FluxKontextImageScale(io.ComfyNode):
|
||||
node_id="FluxKontextImageScale",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
short_description="Resizes images to optimal dimensions for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
@@ -164,8 +156,6 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode):
|
||||
node_id="FluxKontextMultiReferenceLatentMethod",
|
||||
display_name="Edit Model Reference Method",
|
||||
category="advanced/conditioning/flux",
|
||||
description="Selects the method used for handling multiple reference latents in Flux Kontext edit models.",
|
||||
short_description="Selects reference latent method for Flux Kontext.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Combo.Input(
|
||||
@@ -224,8 +214,6 @@ class Flux2Scheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="Flux2Scheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a sigma schedule for Flux 2 sampling based on step count and image resolution.",
|
||||
short_description="Generates a sigma schedule for Flux 2 sampling.",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=4096),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1),
|
||||
|
||||
@@ -30,8 +30,6 @@ class FreeU(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v1 to a UNet model, boosting backbone features and filtering skip connections using Fourier transforms for improved quality.",
|
||||
short_description="Applies FreeU v1 backbone boost and skip filtering.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
|
||||
@@ -79,8 +77,6 @@ class FreeU_V2(IO.ComfyNode):
|
||||
return IO.Schema(
|
||||
node_id="FreeU_V2",
|
||||
category="model_patches/unet",
|
||||
description="Applies FreeU v2 to a UNet model with adaptive backbone scaling based on hidden state magnitude and Fourier skip filtering.",
|
||||
short_description="Applies FreeU v2 with adaptive scaling.",
|
||||
inputs=[
|
||||
IO.Model.Input("model"),
|
||||
IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
|
||||
|
||||
@@ -62,7 +62,6 @@ class FreSca(io.ComfyNode):
|
||||
display_name="FreSca",
|
||||
category="_for_testing",
|
||||
description="Applies frequency-dependent scaling to the guidance",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||
|
||||
@@ -341,8 +341,6 @@ class GITSScheduler(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="GITSScheduler",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
description="Generates a noise schedule using the GITS method with precomputed optimal sigma levels and configurable coefficient.",
|
||||
short_description="Generates a GITS noise schedule with optimal sigma levels.",
|
||||
inputs=[
|
||||
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
|
||||
io.Int.Input("steps", default=10, min=2, max=1000),
|
||||
|
||||
897
comfy_extras/nodes_glsl.py
Normal file
897
comfy_extras/nodes_glsl.py
Normal file
@@ -0,0 +1,897 @@
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import logging
|
||||
import ctypes.util
|
||||
import importlib.util
|
||||
from typing import TypedDict
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
import nodes
|
||||
from comfy_api.latest import ComfyExtension, io, ui
|
||||
from typing_extensions import override
|
||||
from utils.install_util import get_missing_requirements_message
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _check_opengl_availability():
|
||||
"""Early check for OpenGL availability. Raises RuntimeError if unlikely to work."""
|
||||
logger.debug("_check_opengl_availability: starting")
|
||||
missing = []
|
||||
|
||||
# Check Python packages (using find_spec to avoid importing)
|
||||
logger.debug("_check_opengl_availability: checking for glfw package")
|
||||
if importlib.util.find_spec("glfw") is None:
|
||||
missing.append("glfw")
|
||||
|
||||
logger.debug("_check_opengl_availability: checking for OpenGL package")
|
||||
if importlib.util.find_spec("OpenGL") is None:
|
||||
missing.append("PyOpenGL")
|
||||
|
||||
if missing:
|
||||
raise RuntimeError(
|
||||
f"OpenGL dependencies not available.\n{get_missing_requirements_message()}\n"
|
||||
)
|
||||
|
||||
# On Linux without display, check if headless backends are available
|
||||
logger.debug(f"_check_opengl_availability: platform={sys.platform}")
|
||||
if sys.platform.startswith("linux"):
|
||||
has_display = os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")
|
||||
logger.debug(f"_check_opengl_availability: has_display={bool(has_display)}")
|
||||
if not has_display:
|
||||
# Check for EGL or OSMesa libraries
|
||||
logger.debug("_check_opengl_availability: checking for EGL library")
|
||||
has_egl = ctypes.util.find_library("EGL")
|
||||
logger.debug("_check_opengl_availability: checking for OSMesa library")
|
||||
has_osmesa = ctypes.util.find_library("OSMesa")
|
||||
|
||||
# Error disabled for CI as it fails this check
|
||||
# if not has_egl and not has_osmesa:
|
||||
# raise RuntimeError(
|
||||
# "GLSL Shader node: No display and no headless backend (EGL/OSMesa) found.\n"
|
||||
# "See error below for installation instructions."
|
||||
# )
|
||||
logger.debug(f"Headless mode: EGL={'yes' if has_egl else 'no'}, OSMesa={'yes' if has_osmesa else 'no'}")
|
||||
|
||||
logger.debug("_check_opengl_availability: completed")
|
||||
|
||||
|
||||
# Run early check at import time
|
||||
logger.debug("nodes_glsl: running _check_opengl_availability at import time")
|
||||
_check_opengl_availability()
|
||||
|
||||
# OpenGL modules - initialized lazily when context is created
|
||||
gl = None
|
||||
glfw = None
|
||||
EGL = None
|
||||
|
||||
|
||||
def _import_opengl():
|
||||
"""Import OpenGL module. Called after context is created."""
|
||||
global gl
|
||||
if gl is None:
|
||||
logger.debug("_import_opengl: importing OpenGL.GL")
|
||||
import OpenGL.GL as _gl
|
||||
gl = _gl
|
||||
logger.debug("_import_opengl: import completed")
|
||||
return gl
|
||||
|
||||
|
||||
class SizeModeInput(TypedDict):
|
||||
size_mode: str
|
||||
width: int
|
||||
height: int
|
||||
|
||||
|
||||
MAX_IMAGES = 5 # u_image0-4
|
||||
MAX_UNIFORMS = 5 # u_float0-4, u_int0-4
|
||||
MAX_OUTPUTS = 4 # fragColor0-3 (MRT)
|
||||
|
||||
# Vertex shader using gl_VertexID trick - no VBO needed.
|
||||
# Draws a single triangle that covers the entire screen:
|
||||
#
|
||||
# (-1,3)
|
||||
# /|
|
||||
# / | <- visible area is the unit square from (-1,-1) to (1,1)
|
||||
# / | parts outside get clipped away
|
||||
# (-1,-1)---(3,-1)
|
||||
#
|
||||
# v_texCoord is computed from clip space: * 0.5 + 0.5 maps (-1,1) -> (0,1)
|
||||
VERTEX_SHADER = """#version 330 core
|
||||
out vec2 v_texCoord;
|
||||
void main() {
|
||||
vec2 verts[3] = vec2[](vec2(-1, -1), vec2(3, -1), vec2(-1, 3));
|
||||
v_texCoord = verts[gl_VertexID] * 0.5 + 0.5;
|
||||
gl_Position = vec4(verts[gl_VertexID], 0, 1);
|
||||
}
|
||||
"""
|
||||
|
||||
DEFAULT_FRAGMENT_SHADER = """#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
void main() {
|
||||
fragColor0 = texture(u_image0, v_texCoord);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def _convert_es_to_desktop(source: str) -> str:
|
||||
"""Convert GLSL ES (WebGL) shader source to desktop GLSL 330 core."""
|
||||
# Remove any existing #version directive
|
||||
source = re.sub(r"#version\s+\d+(\s+es)?\s*\n?", "", source, flags=re.IGNORECASE)
|
||||
# Remove precision qualifiers (not needed in desktop GLSL)
|
||||
source = re.sub(r"precision\s+(lowp|mediump|highp)\s+\w+\s*;\s*\n?", "", source)
|
||||
# Prepend desktop GLSL version
|
||||
return "#version 330 core\n" + source
|
||||
|
||||
|
||||
def _detect_output_count(source: str) -> int:
|
||||
"""Detect how many fragColor outputs are used in the shader.
|
||||
|
||||
Returns the count of outputs needed (1 to MAX_OUTPUTS).
|
||||
"""
|
||||
matches = re.findall(r"fragColor(\d+)", source)
|
||||
if not matches:
|
||||
return 1 # Default to 1 output if none found
|
||||
max_index = max(int(m) for m in matches)
|
||||
return min(max_index + 1, MAX_OUTPUTS)
|
||||
|
||||
|
||||
def _detect_pass_count(source: str) -> int:
|
||||
"""Detect multi-pass rendering from #pragma passes N directive.
|
||||
|
||||
Returns the number of passes (1 if not specified).
|
||||
"""
|
||||
match = re.search(r'#pragma\s+passes\s+(\d+)', source)
|
||||
if match:
|
||||
return max(1, int(match.group(1)))
|
||||
return 1
|
||||
|
||||
|
||||
def _init_glfw():
|
||||
"""Initialize GLFW. Returns (window, glfw_module). Raises RuntimeError on failure."""
|
||||
logger.debug("_init_glfw: starting")
|
||||
# On macOS, glfw.init() must be called from main thread or it hangs forever
|
||||
if sys.platform == "darwin":
|
||||
logger.debug("_init_glfw: skipping on macOS")
|
||||
raise RuntimeError("GLFW backend not supported on macOS")
|
||||
|
||||
logger.debug("_init_glfw: importing glfw module")
|
||||
import glfw as _glfw
|
||||
|
||||
logger.debug("_init_glfw: calling glfw.init()")
|
||||
if not _glfw.init():
|
||||
raise RuntimeError("glfw.init() failed")
|
||||
|
||||
try:
|
||||
logger.debug("_init_glfw: setting window hints")
|
||||
_glfw.window_hint(_glfw.VISIBLE, _glfw.FALSE)
|
||||
_glfw.window_hint(_glfw.CONTEXT_VERSION_MAJOR, 3)
|
||||
_glfw.window_hint(_glfw.CONTEXT_VERSION_MINOR, 3)
|
||||
_glfw.window_hint(_glfw.OPENGL_PROFILE, _glfw.OPENGL_CORE_PROFILE)
|
||||
|
||||
logger.debug("_init_glfw: calling create_window()")
|
||||
window = _glfw.create_window(64, 64, "ComfyUI GLSL", None, None)
|
||||
if not window:
|
||||
raise RuntimeError("glfw.create_window() failed")
|
||||
|
||||
logger.debug("_init_glfw: calling make_context_current()")
|
||||
_glfw.make_context_current(window)
|
||||
logger.debug("_init_glfw: completed successfully")
|
||||
return window, _glfw
|
||||
except Exception:
|
||||
logger.debug("_init_glfw: failed, terminating glfw")
|
||||
_glfw.terminate()
|
||||
raise
|
||||
|
||||
|
||||
def _init_egl():
|
||||
"""Initialize EGL for headless rendering. Returns (display, context, surface, EGL_module). Raises RuntimeError on failure."""
|
||||
logger.debug("_init_egl: starting")
|
||||
from OpenGL import EGL as _EGL
|
||||
from OpenGL.EGL import (
|
||||
eglGetDisplay, eglInitialize, eglChooseConfig, eglCreateContext,
|
||||
eglMakeCurrent, eglCreatePbufferSurface, eglBindAPI,
|
||||
eglTerminate, eglDestroyContext, eglDestroySurface,
|
||||
EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, EGL_NONE,
|
||||
EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
|
||||
EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_BLUE_SIZE, EGL_ALPHA_SIZE, EGL_DEPTH_SIZE,
|
||||
EGL_WIDTH, EGL_HEIGHT, EGL_OPENGL_API,
|
||||
)
|
||||
logger.debug("_init_egl: imports completed")
|
||||
|
||||
display = None
|
||||
context = None
|
||||
surface = None
|
||||
|
||||
try:
|
||||
logger.debug("_init_egl: calling eglGetDisplay()")
|
||||
display = eglGetDisplay(EGL_DEFAULT_DISPLAY)
|
||||
if display == _EGL.EGL_NO_DISPLAY:
|
||||
raise RuntimeError("eglGetDisplay() failed")
|
||||
|
||||
logger.debug("_init_egl: calling eglInitialize()")
|
||||
major, minor = _EGL.EGLint(), _EGL.EGLint()
|
||||
if not eglInitialize(display, major, minor):
|
||||
display = None # Not initialized, don't terminate
|
||||
raise RuntimeError("eglInitialize() failed")
|
||||
logger.debug(f"_init_egl: EGL version {major.value}.{minor.value}")
|
||||
|
||||
config_attribs = [
|
||||
EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
|
||||
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
|
||||
EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8,
|
||||
EGL_DEPTH_SIZE, 0, EGL_NONE
|
||||
]
|
||||
configs = (_EGL.EGLConfig * 1)()
|
||||
num_configs = _EGL.EGLint()
|
||||
if not eglChooseConfig(display, config_attribs, configs, 1, num_configs) or num_configs.value == 0:
|
||||
raise RuntimeError("eglChooseConfig() failed")
|
||||
config = configs[0]
|
||||
logger.debug(f"_init_egl: config chosen, num_configs={num_configs.value}")
|
||||
|
||||
if not eglBindAPI(EGL_OPENGL_API):
|
||||
raise RuntimeError("eglBindAPI() failed")
|
||||
|
||||
logger.debug("_init_egl: calling eglCreateContext()")
|
||||
context_attribs = [
|
||||
_EGL.EGL_CONTEXT_MAJOR_VERSION, 3,
|
||||
_EGL.EGL_CONTEXT_MINOR_VERSION, 3,
|
||||
_EGL.EGL_CONTEXT_OPENGL_PROFILE_MASK, _EGL.EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT,
|
||||
EGL_NONE
|
||||
]
|
||||
context = eglCreateContext(display, config, EGL_NO_CONTEXT, context_attribs)
|
||||
if context == EGL_NO_CONTEXT:
|
||||
raise RuntimeError("eglCreateContext() failed")
|
||||
|
||||
logger.debug("_init_egl: calling eglCreatePbufferSurface()")
|
||||
pbuffer_attribs = [EGL_WIDTH, 64, EGL_HEIGHT, 64, EGL_NONE]
|
||||
surface = eglCreatePbufferSurface(display, config, pbuffer_attribs)
|
||||
if surface == _EGL.EGL_NO_SURFACE:
|
||||
raise RuntimeError("eglCreatePbufferSurface() failed")
|
||||
|
||||
logger.debug("_init_egl: calling eglMakeCurrent()")
|
||||
if not eglMakeCurrent(display, surface, surface, context):
|
||||
raise RuntimeError("eglMakeCurrent() failed")
|
||||
|
||||
logger.debug("_init_egl: completed successfully")
|
||||
return display, context, surface, _EGL
|
||||
|
||||
except Exception:
|
||||
logger.debug("_init_egl: failed, cleaning up")
|
||||
# Clean up any resources on failure
|
||||
if surface is not None:
|
||||
eglDestroySurface(display, surface)
|
||||
if context is not None:
|
||||
eglDestroyContext(display, context)
|
||||
if display is not None:
|
||||
eglTerminate(display)
|
||||
raise
|
||||
|
||||
|
||||
def _init_osmesa():
|
||||
"""Initialize OSMesa for software rendering. Returns (context, buffer). Raises RuntimeError on failure."""
|
||||
import ctypes
|
||||
|
||||
logger.debug("_init_osmesa: starting")
|
||||
os.environ["PYOPENGL_PLATFORM"] = "osmesa"
|
||||
|
||||
logger.debug("_init_osmesa: importing OpenGL.osmesa")
|
||||
from OpenGL import GL as _gl
|
||||
from OpenGL.osmesa import (
|
||||
OSMesaCreateContextExt, OSMesaMakeCurrent, OSMesaDestroyContext,
|
||||
OSMESA_RGBA,
|
||||
)
|
||||
logger.debug("_init_osmesa: imports completed")
|
||||
|
||||
ctx = OSMesaCreateContextExt(OSMESA_RGBA, 24, 0, 0, None)
|
||||
if not ctx:
|
||||
raise RuntimeError("OSMesaCreateContextExt() failed")
|
||||
|
||||
width, height = 64, 64
|
||||
buffer = (ctypes.c_ubyte * (width * height * 4))()
|
||||
|
||||
logger.debug("_init_osmesa: calling OSMesaMakeCurrent()")
|
||||
if not OSMesaMakeCurrent(ctx, buffer, _gl.GL_UNSIGNED_BYTE, width, height):
|
||||
OSMesaDestroyContext(ctx)
|
||||
raise RuntimeError("OSMesaMakeCurrent() failed")
|
||||
|
||||
logger.debug("_init_osmesa: completed successfully")
|
||||
return ctx, buffer
|
||||
|
||||
|
||||
class GLContext:
|
||||
"""Manages OpenGL context and resources for shader execution.
|
||||
|
||||
Tries backends in order: GLFW (desktop) → EGL (headless GPU) → OSMesa (software).
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_initialized = False
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if GLContext._initialized:
|
||||
logger.debug("GLContext.__init__: already initialized, skipping")
|
||||
return
|
||||
|
||||
logger.debug("GLContext.__init__: starting initialization")
|
||||
|
||||
global glfw, EGL
|
||||
|
||||
import time
|
||||
start = time.perf_counter()
|
||||
|
||||
self._backend = None
|
||||
self._window = None
|
||||
self._egl_display = None
|
||||
self._egl_context = None
|
||||
self._egl_surface = None
|
||||
self._osmesa_ctx = None
|
||||
self._osmesa_buffer = None
|
||||
self._vao = None
|
||||
|
||||
# Try backends in order: GLFW → EGL → OSMesa
|
||||
errors = []
|
||||
|
||||
logger.debug("GLContext.__init__: trying GLFW backend")
|
||||
try:
|
||||
self._window, glfw = _init_glfw()
|
||||
self._backend = "glfw"
|
||||
logger.debug("GLContext.__init__: GLFW backend succeeded")
|
||||
except Exception as e:
|
||||
logger.debug(f"GLContext.__init__: GLFW backend failed: {e}")
|
||||
errors.append(("GLFW", e))
|
||||
|
||||
if self._backend is None:
|
||||
logger.debug("GLContext.__init__: trying EGL backend")
|
||||
try:
|
||||
self._egl_display, self._egl_context, self._egl_surface, EGL = _init_egl()
|
||||
self._backend = "egl"
|
||||
logger.debug("GLContext.__init__: EGL backend succeeded")
|
||||
except Exception as e:
|
||||
logger.debug(f"GLContext.__init__: EGL backend failed: {e}")
|
||||
errors.append(("EGL", e))
|
||||
|
||||
if self._backend is None:
|
||||
logger.debug("GLContext.__init__: trying OSMesa backend")
|
||||
try:
|
||||
self._osmesa_ctx, self._osmesa_buffer = _init_osmesa()
|
||||
self._backend = "osmesa"
|
||||
logger.debug("GLContext.__init__: OSMesa backend succeeded")
|
||||
except Exception as e:
|
||||
logger.debug(f"GLContext.__init__: OSMesa backend failed: {e}")
|
||||
errors.append(("OSMesa", e))
|
||||
|
||||
if self._backend is None:
|
||||
if sys.platform == "win32":
|
||||
platform_help = (
|
||||
"Windows: Ensure GPU drivers are installed and display is available.\n"
|
||||
" CPU-only/headless mode is not supported on Windows."
|
||||
)
|
||||
elif sys.platform == "darwin":
|
||||
platform_help = (
|
||||
"macOS: GLFW is not supported.\n"
|
||||
" Install OSMesa via Homebrew: brew install mesa\n"
|
||||
" Then: pip install PyOpenGL PyOpenGL-accelerate"
|
||||
)
|
||||
else:
|
||||
platform_help = (
|
||||
"Linux: Install one of these backends:\n"
|
||||
" Desktop: sudo apt install libgl1-mesa-glx libglfw3\n"
|
||||
" Headless with GPU: sudo apt install libegl1-mesa libgl1-mesa-dri\n"
|
||||
" Headless (CPU): sudo apt install libosmesa6"
|
||||
)
|
||||
|
||||
error_details = "\n".join(f" {name}: {err}" for name, err in errors)
|
||||
raise RuntimeError(
|
||||
f"Failed to create OpenGL context.\n\n"
|
||||
f"Backend errors:\n{error_details}\n\n"
|
||||
f"{platform_help}"
|
||||
)
|
||||
|
||||
# Now import OpenGL.GL (after context is current)
|
||||
logger.debug("GLContext.__init__: importing OpenGL.GL")
|
||||
_import_opengl()
|
||||
|
||||
# Create VAO (required for core profile, but OSMesa may use compat profile)
|
||||
logger.debug("GLContext.__init__: creating VAO")
|
||||
try:
|
||||
vao = gl.glGenVertexArrays(1)
|
||||
gl.glBindVertexArray(vao)
|
||||
self._vao = vao # Only store after successful bind
|
||||
logger.debug("GLContext.__init__: VAO created successfully")
|
||||
except Exception as e:
|
||||
logger.debug(f"GLContext.__init__: VAO creation failed (may be expected for OSMesa): {e}")
|
||||
# OSMesa with older Mesa may not support VAOs
|
||||
# Clean up if we created but couldn't bind
|
||||
if vao:
|
||||
try:
|
||||
gl.glDeleteVertexArrays(1, [vao])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
elapsed = (time.perf_counter() - start) * 1000
|
||||
|
||||
# Log device info
|
||||
renderer = gl.glGetString(gl.GL_RENDERER)
|
||||
vendor = gl.glGetString(gl.GL_VENDOR)
|
||||
version = gl.glGetString(gl.GL_VERSION)
|
||||
renderer = renderer.decode() if renderer else "Unknown"
|
||||
vendor = vendor.decode() if vendor else "Unknown"
|
||||
version = version.decode() if version else "Unknown"
|
||||
|
||||
GLContext._initialized = True
|
||||
logger.info(f"GLSL context initialized in {elapsed:.1f}ms ({self._backend}) - {renderer} ({vendor}), GL {version}")
|
||||
|
||||
def make_current(self):
|
||||
if self._backend == "glfw":
|
||||
glfw.make_context_current(self._window)
|
||||
elif self._backend == "egl":
|
||||
from OpenGL.EGL import eglMakeCurrent
|
||||
eglMakeCurrent(self._egl_display, self._egl_surface, self._egl_surface, self._egl_context)
|
||||
elif self._backend == "osmesa":
|
||||
from OpenGL.osmesa import OSMesaMakeCurrent
|
||||
OSMesaMakeCurrent(self._osmesa_ctx, self._osmesa_buffer, gl.GL_UNSIGNED_BYTE, 64, 64)
|
||||
|
||||
if self._vao is not None:
|
||||
gl.glBindVertexArray(self._vao)
|
||||
|
||||
|
||||
def _compile_shader(source: str, shader_type: int) -> int:
|
||||
"""Compile a shader and return its ID."""
|
||||
shader = gl.glCreateShader(shader_type)
|
||||
gl.glShaderSource(shader, source)
|
||||
gl.glCompileShader(shader)
|
||||
|
||||
if gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS) != gl.GL_TRUE:
|
||||
error = gl.glGetShaderInfoLog(shader).decode()
|
||||
gl.glDeleteShader(shader)
|
||||
raise RuntimeError(f"Shader compilation failed:\n{error}")
|
||||
|
||||
return shader
|
||||
|
||||
|
||||
def _create_program(vertex_source: str, fragment_source: str) -> int:
|
||||
"""Create and link a shader program."""
|
||||
vertex_shader = _compile_shader(vertex_source, gl.GL_VERTEX_SHADER)
|
||||
try:
|
||||
fragment_shader = _compile_shader(fragment_source, gl.GL_FRAGMENT_SHADER)
|
||||
except RuntimeError:
|
||||
gl.glDeleteShader(vertex_shader)
|
||||
raise
|
||||
|
||||
program = gl.glCreateProgram()
|
||||
gl.glAttachShader(program, vertex_shader)
|
||||
gl.glAttachShader(program, fragment_shader)
|
||||
gl.glLinkProgram(program)
|
||||
|
||||
gl.glDeleteShader(vertex_shader)
|
||||
gl.glDeleteShader(fragment_shader)
|
||||
|
||||
if gl.glGetProgramiv(program, gl.GL_LINK_STATUS) != gl.GL_TRUE:
|
||||
error = gl.glGetProgramInfoLog(program).decode()
|
||||
gl.glDeleteProgram(program)
|
||||
raise RuntimeError(f"Program linking failed:\n{error}")
|
||||
|
||||
return program
|
||||
|
||||
|
||||
def _render_shader_batch(
|
||||
fragment_code: str,
|
||||
width: int,
|
||||
height: int,
|
||||
image_batches: list[list[np.ndarray]],
|
||||
floats: list[float],
|
||||
ints: list[int],
|
||||
) -> list[list[np.ndarray]]:
|
||||
"""
|
||||
Render a fragment shader for multiple batches efficiently.
|
||||
|
||||
Compiles shader once, reuses framebuffer/textures across batches.
|
||||
Supports multi-pass rendering via #pragma passes N directive.
|
||||
|
||||
Args:
|
||||
fragment_code: User's fragment shader code
|
||||
width: Output width
|
||||
height: Output height
|
||||
image_batches: List of batches, each batch is a list of input images (H, W, C) float32 [0,1]
|
||||
floats: List of float uniforms
|
||||
ints: List of int uniforms
|
||||
|
||||
Returns:
|
||||
List of batch outputs, each is a list of output images (H, W, 4) float32 [0,1]
|
||||
"""
|
||||
import time
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if not image_batches:
|
||||
return []
|
||||
|
||||
ctx = GLContext()
|
||||
ctx.make_current()
|
||||
|
||||
# Convert from GLSL ES to desktop GLSL 330
|
||||
fragment_source = _convert_es_to_desktop(fragment_code)
|
||||
|
||||
# Detect how many outputs the shader actually uses
|
||||
num_outputs = _detect_output_count(fragment_code)
|
||||
|
||||
# Detect multi-pass rendering
|
||||
num_passes = _detect_pass_count(fragment_code)
|
||||
|
||||
# Track resources for cleanup
|
||||
program = None
|
||||
fbo = None
|
||||
output_textures = []
|
||||
input_textures = []
|
||||
ping_pong_textures = []
|
||||
ping_pong_fbos = []
|
||||
|
||||
num_inputs = len(image_batches[0])
|
||||
|
||||
try:
|
||||
# Compile shaders (once for all batches)
|
||||
try:
|
||||
program = _create_program(VERTEX_SHADER, fragment_source)
|
||||
except RuntimeError:
|
||||
logger.error(f"Fragment shader:\n{fragment_source}")
|
||||
raise
|
||||
|
||||
gl.glUseProgram(program)
|
||||
|
||||
# Create framebuffer with only the needed color attachments
|
||||
fbo = gl.glGenFramebuffers(1)
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
|
||||
|
||||
draw_buffers = []
|
||||
for i in range(num_outputs):
|
||||
tex = gl.glGenTextures(1)
|
||||
output_textures.append(tex)
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, tex)
|
||||
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, width, height, 0, gl.GL_RGBA, gl.GL_FLOAT, None)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
|
||||
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0 + i, gl.GL_TEXTURE_2D, tex, 0)
|
||||
draw_buffers.append(gl.GL_COLOR_ATTACHMENT0 + i)
|
||||
|
||||
gl.glDrawBuffers(num_outputs, draw_buffers)
|
||||
|
||||
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
|
||||
raise RuntimeError("Framebuffer is not complete")
|
||||
|
||||
# Create ping-pong resources for multi-pass rendering
|
||||
if num_passes > 1:
|
||||
for _ in range(2):
|
||||
pp_tex = gl.glGenTextures(1)
|
||||
ping_pong_textures.append(pp_tex)
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, pp_tex)
|
||||
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, width, height, 0, gl.GL_RGBA, gl.GL_FLOAT, None)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
|
||||
|
||||
pp_fbo = gl.glGenFramebuffers(1)
|
||||
ping_pong_fbos.append(pp_fbo)
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, pp_fbo)
|
||||
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, pp_tex, 0)
|
||||
gl.glDrawBuffers(1, [gl.GL_COLOR_ATTACHMENT0])
|
||||
|
||||
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
|
||||
raise RuntimeError("Ping-pong framebuffer is not complete")
|
||||
|
||||
# Create input textures (reused for all batches)
|
||||
for i in range(num_inputs):
|
||||
tex = gl.glGenTextures(1)
|
||||
input_textures.append(tex)
|
||||
gl.glActiveTexture(gl.GL_TEXTURE0 + i)
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, tex)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
|
||||
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
|
||||
|
||||
loc = gl.glGetUniformLocation(program, f"u_image{i}")
|
||||
if loc >= 0:
|
||||
gl.glUniform1i(loc, i)
|
||||
|
||||
# Set static uniforms (once for all batches)
|
||||
loc = gl.glGetUniformLocation(program, "u_resolution")
|
||||
if loc >= 0:
|
||||
gl.glUniform2f(loc, float(width), float(height))
|
||||
|
||||
for i, v in enumerate(floats):
|
||||
loc = gl.glGetUniformLocation(program, f"u_float{i}")
|
||||
if loc >= 0:
|
||||
gl.glUniform1f(loc, v)
|
||||
|
||||
for i, v in enumerate(ints):
|
||||
loc = gl.glGetUniformLocation(program, f"u_int{i}")
|
||||
if loc >= 0:
|
||||
gl.glUniform1i(loc, v)
|
||||
|
||||
# Get u_pass uniform location for multi-pass
|
||||
pass_loc = gl.glGetUniformLocation(program, "u_pass")
|
||||
|
||||
gl.glViewport(0, 0, width, height)
|
||||
gl.glDisable(gl.GL_BLEND) # Ensure no alpha blending - write output directly
|
||||
|
||||
# Process each batch
|
||||
all_batch_outputs = []
|
||||
for images in image_batches:
|
||||
# Update input textures with this batch's images
|
||||
for i, img in enumerate(images):
|
||||
gl.glActiveTexture(gl.GL_TEXTURE0 + i)
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, input_textures[i])
|
||||
|
||||
# Flip vertically for GL coordinates, ensure RGBA
|
||||
h, w, c = img.shape
|
||||
if c == 3:
|
||||
img_upload = np.empty((h, w, 4), dtype=np.float32)
|
||||
img_upload[:, :, :3] = img[::-1, :, :]
|
||||
img_upload[:, :, 3] = 1.0
|
||||
else:
|
||||
img_upload = np.ascontiguousarray(img[::-1, :, :])
|
||||
|
||||
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, w, h, 0, gl.GL_RGBA, gl.GL_FLOAT, img_upload)
|
||||
|
||||
if num_passes == 1:
|
||||
# Single pass - render directly to output FBO
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
|
||||
if pass_loc >= 0:
|
||||
gl.glUniform1i(pass_loc, 0)
|
||||
gl.glClearColor(0, 0, 0, 0)
|
||||
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
|
||||
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3)
|
||||
else:
|
||||
# Multi-pass rendering with ping-pong
|
||||
for p in range(num_passes):
|
||||
is_last_pass = (p == num_passes - 1)
|
||||
|
||||
# Set pass uniform
|
||||
if pass_loc >= 0:
|
||||
gl.glUniform1i(pass_loc, p)
|
||||
|
||||
if is_last_pass:
|
||||
# Last pass renders to the main output FBO
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
|
||||
else:
|
||||
# Intermediate passes render to ping-pong FBO
|
||||
target_fbo = ping_pong_fbos[p % 2]
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, target_fbo)
|
||||
|
||||
# Set input texture for this pass
|
||||
gl.glActiveTexture(gl.GL_TEXTURE0)
|
||||
if p == 0:
|
||||
# First pass reads from original input
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, input_textures[0])
|
||||
else:
|
||||
# Subsequent passes read from previous pass output
|
||||
source_tex = ping_pong_textures[(p - 1) % 2]
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, source_tex)
|
||||
|
||||
gl.glClearColor(0, 0, 0, 0)
|
||||
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
|
||||
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3)
|
||||
|
||||
# Read back outputs for this batch
|
||||
# (glGetTexImage is synchronous, implicitly waits for rendering)
|
||||
batch_outputs = []
|
||||
for tex in output_textures:
|
||||
gl.glBindTexture(gl.GL_TEXTURE_2D, tex)
|
||||
data = gl.glGetTexImage(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, gl.GL_FLOAT)
|
||||
img = np.frombuffer(data, dtype=np.float32).reshape(height, width, 4)
|
||||
batch_outputs.append(np.ascontiguousarray(img[::-1, :, :]))
|
||||
|
||||
# Pad with black images for unused outputs
|
||||
black_img = np.zeros((height, width, 4), dtype=np.float32)
|
||||
for _ in range(num_outputs, MAX_OUTPUTS):
|
||||
batch_outputs.append(black_img)
|
||||
|
||||
all_batch_outputs.append(batch_outputs)
|
||||
|
||||
elapsed = (time.perf_counter() - start_time) * 1000
|
||||
num_batches = len(image_batches)
|
||||
pass_info = f", {num_passes} passes" if num_passes > 1 else ""
|
||||
logger.info(f"GLSL shader executed in {elapsed:.1f}ms ({num_batches} batch{'es' if num_batches != 1 else ''}, {width}x{height}{pass_info})")
|
||||
|
||||
return all_batch_outputs
|
||||
|
||||
finally:
|
||||
# Unbind before deleting
|
||||
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
|
||||
gl.glUseProgram(0)
|
||||
|
||||
if input_textures:
|
||||
gl.glDeleteTextures(len(input_textures), input_textures)
|
||||
if output_textures:
|
||||
gl.glDeleteTextures(len(output_textures), output_textures)
|
||||
if ping_pong_textures:
|
||||
gl.glDeleteTextures(len(ping_pong_textures), ping_pong_textures)
|
||||
if fbo is not None:
|
||||
gl.glDeleteFramebuffers(1, [fbo])
|
||||
for pp_fbo in ping_pong_fbos:
|
||||
gl.glDeleteFramebuffers(1, [pp_fbo])
|
||||
if program is not None:
|
||||
gl.glDeleteProgram(program)
|
||||
|
||||
class GLSLShader(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
image_template = io.Autogrow.TemplatePrefix(
|
||||
io.Image.Input("image"),
|
||||
prefix="image",
|
||||
min=1,
|
||||
max=MAX_IMAGES,
|
||||
)
|
||||
|
||||
float_template = io.Autogrow.TemplatePrefix(
|
||||
io.Float.Input("float", default=0.0),
|
||||
prefix="u_float",
|
||||
min=0,
|
||||
max=MAX_UNIFORMS,
|
||||
)
|
||||
|
||||
int_template = io.Autogrow.TemplatePrefix(
|
||||
io.Int.Input("int", default=0),
|
||||
prefix="u_int",
|
||||
min=0,
|
||||
max=MAX_UNIFORMS,
|
||||
)
|
||||
|
||||
return io.Schema(
|
||||
node_id="GLSLShader",
|
||||
display_name="GLSL Shader",
|
||||
category="image/shader",
|
||||
description=(
|
||||
f"Apply GLSL fragment shaders to images. "
|
||||
f"Inputs: u_image0-{MAX_IMAGES-1} (sampler2D), u_resolution (vec2), "
|
||||
f"u_float0-{MAX_UNIFORMS-1}, u_int0-{MAX_UNIFORMS-1}. "
|
||||
f"Outputs: layout(location = 0-{MAX_OUTPUTS-1}) out vec4 fragColor0-{MAX_OUTPUTS-1}."
|
||||
),
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
"fragment_shader",
|
||||
default=DEFAULT_FRAGMENT_SHADER,
|
||||
multiline=True,
|
||||
tooltip="GLSL fragment shader source code (GLSL ES 3.00 / WebGL 2.0 compatible)",
|
||||
),
|
||||
io.DynamicCombo.Input(
|
||||
"size_mode",
|
||||
options=[
|
||||
io.DynamicCombo.Option("from_input", []),
|
||||
io.DynamicCombo.Option(
|
||||
"custom",
|
||||
[
|
||||
io.Int.Input(
|
||||
"width",
|
||||
default=512,
|
||||
min=1,
|
||||
max=nodes.MAX_RESOLUTION,
|
||||
),
|
||||
io.Int.Input(
|
||||
"height",
|
||||
default=512,
|
||||
min=1,
|
||||
max=nodes.MAX_RESOLUTION,
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Output size: 'from_input' uses first input image dimensions, 'custom' allows manual size",
|
||||
),
|
||||
io.Autogrow.Input("images", template=image_template),
|
||||
io.Autogrow.Input("floats", template=float_template),
|
||||
io.Autogrow.Input("ints", template=int_template),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="IMAGE0"),
|
||||
io.Image.Output(display_name="IMAGE1"),
|
||||
io.Image.Output(display_name="IMAGE2"),
|
||||
io.Image.Output(display_name="IMAGE3"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls,
|
||||
fragment_shader: str,
|
||||
size_mode: SizeModeInput,
|
||||
images: io.Autogrow.Type,
|
||||
floats: io.Autogrow.Type = None,
|
||||
ints: io.Autogrow.Type = None,
|
||||
**kwargs,
|
||||
) -> io.NodeOutput:
|
||||
image_list = [v for v in images.values() if v is not None]
|
||||
float_list = (
|
||||
[v if v is not None else 0.0 for v in floats.values()] if floats else []
|
||||
)
|
||||
int_list = [v if v is not None else 0 for v in ints.values()] if ints else []
|
||||
|
||||
if not image_list:
|
||||
raise ValueError("At least one input image is required")
|
||||
|
||||
# Determine output dimensions
|
||||
if size_mode["size_mode"] == "custom":
|
||||
out_width = size_mode["width"]
|
||||
out_height = size_mode["height"]
|
||||
else:
|
||||
out_height, out_width = image_list[0].shape[1:3]
|
||||
|
||||
batch_size = image_list[0].shape[0]
|
||||
|
||||
# Prepare batches
|
||||
image_batches = []
|
||||
for batch_idx in range(batch_size):
|
||||
batch_images = [img_tensor[batch_idx].cpu().numpy().astype(np.float32) for img_tensor in image_list]
|
||||
image_batches.append(batch_images)
|
||||
|
||||
all_batch_outputs = _render_shader_batch(
|
||||
fragment_shader,
|
||||
out_width,
|
||||
out_height,
|
||||
image_batches,
|
||||
float_list,
|
||||
int_list,
|
||||
)
|
||||
|
||||
# Collect outputs into tensors
|
||||
all_outputs = [[] for _ in range(MAX_OUTPUTS)]
|
||||
for batch_outputs in all_batch_outputs:
|
||||
for i, out_img in enumerate(batch_outputs):
|
||||
all_outputs[i].append(torch.from_numpy(out_img))
|
||||
|
||||
output_tensors = [torch.stack(all_outputs[i], dim=0) for i in range(MAX_OUTPUTS)]
|
||||
return io.NodeOutput(
|
||||
*output_tensors,
|
||||
ui=cls._build_ui_output(image_list, output_tensors[0]),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _build_ui_output(
|
||||
cls, image_list: list[torch.Tensor], output_batch: torch.Tensor
|
||||
) -> dict[str, list]:
|
||||
"""Build UI output with input and output images for client-side shader execution."""
|
||||
combined_inputs = torch.cat(image_list, dim=0)
|
||||
input_images_ui = ui.ImageSaveHelper.save_images(
|
||||
combined_inputs,
|
||||
filename_prefix="GLSLShader_input",
|
||||
folder_type=io.FolderType.temp,
|
||||
cls=None,
|
||||
compress_level=1,
|
||||
)
|
||||
|
||||
output_images_ui = ui.ImageSaveHelper.save_images(
|
||||
output_batch,
|
||||
filename_prefix="GLSLShader_output",
|
||||
folder_type=io.FolderType.temp,
|
||||
cls=None,
|
||||
compress_level=1,
|
||||
)
|
||||
|
||||
return {"input_images": input_images_ui, "images": output_images_ui}
|
||||
|
||||
|
||||
class GLSLExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [GLSLShader]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> GLSLExtension:
|
||||
return GLSLExtension()
|
||||
@@ -13,7 +13,6 @@ class QuadrupleCLIPLoader(io.ComfyNode):
|
||||
node_id="QuadrupleCLIPLoader",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
|
||||
short_description=None,
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
@@ -41,8 +40,6 @@ class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||
node_id="CLIPTextEncodeHiDream",
|
||||
search_aliases=["hidream prompt"],
|
||||
category="advanced/conditioning",
|
||||
description="Encodes separate CLIP-L, CLIP-G, T5-XXL, and Llama text prompts into HiDream conditioning.",
|
||||
short_description="Encodes multi-encoder text prompts for HiDream.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
|
||||
@@ -38,8 +38,6 @@ class PairConditioningSetProperties:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a positive/negative conditioning pair."
|
||||
SHORT_DESCRIPTION = "Set properties on a positive/negative conditioning pair."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive_NEW, negative_NEW,
|
||||
@@ -75,8 +73,6 @@ class PairConditioningSetPropertiesAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set properties on new conditioning pair and combine with existing positive/negative conditioning."
|
||||
SHORT_DESCRIPTION = "Set properties on new cond pair, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, positive, negative, positive_NEW, negative_NEW,
|
||||
@@ -108,8 +104,6 @@ class ConditioningSetProperties:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties like strength, mask, hooks, and timesteps on a single conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on a single conditioning input."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond_NEW,
|
||||
@@ -142,8 +136,6 @@ class ConditioningSetPropertiesAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set properties on new conditioning and combine it with an existing conditioning input."
|
||||
SHORT_DESCRIPTION = "Set properties on new conditioning, combine with existing."
|
||||
FUNCTION = "set_properties"
|
||||
|
||||
def set_properties(self, cond, cond_NEW,
|
||||
@@ -172,8 +164,6 @@ class PairConditioningCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Combine two positive/negative conditioning pairs into a single pair."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine"
|
||||
|
||||
def combine(self, positive_A, negative_A, positive_B, negative_B):
|
||||
@@ -201,8 +191,6 @@ class PairConditioningSetDefaultAndCombine:
|
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("positive", "negative")
|
||||
CATEGORY = "advanced/hooks/cond pair"
|
||||
DESCRIPTION = "Set default conditioning pair and combine it with existing positive/negative conditioning and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default cond pair and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
|
||||
@@ -229,8 +217,6 @@ class ConditioningSetDefaultAndCombine:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "advanced/hooks/cond single"
|
||||
DESCRIPTION = "Set default conditioning and combine it with existing conditioning input and optional hooks."
|
||||
SHORT_DESCRIPTION = "Set default conditioning and combine with existing."
|
||||
FUNCTION = "set_default_and_combine"
|
||||
|
||||
def set_default_and_combine(self, cond, cond_DEFAULT,
|
||||
@@ -258,8 +244,6 @@ class SetClipHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
CATEGORY = "advanced/hooks/clip"
|
||||
DESCRIPTION = "Apply hooks to a CLIP model, optionally propagating them to conditioning outputs and enabling CLIP scheduling."
|
||||
SHORT_DESCRIPTION = "Apply hooks to a CLIP model with scheduling options."
|
||||
FUNCTION = "apply_hooks"
|
||||
|
||||
def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -291,8 +275,6 @@ class ConditioningTimestepsRange:
|
||||
RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
|
||||
RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
|
||||
CATEGORY = "advanced/hooks"
|
||||
DESCRIPTION = "Define a timestep percentage range and output the range plus its complement before and after segments."
|
||||
SHORT_DESCRIPTION = "Define a timestep range with before/after complements."
|
||||
FUNCTION = "create_range"
|
||||
|
||||
def create_range(self, start_percent: float, end_percent: float):
|
||||
@@ -326,8 +308,6 @@ class CreateHookLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook with separate model and CLIP strength that can be scheduled on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a LoRA hook with model and CLIP strength."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -373,8 +353,6 @@ class CreateHookLoraModelOnly(CreateHookLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a LoRA hook that only affects the model (not CLIP) for scheduling on conditioning."
|
||||
SHORT_DESCRIPTION = "Create a model-only LoRA hook."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: comfy.hooks.HookGroup=None):
|
||||
@@ -405,8 +383,6 @@ class CreateHookModelAsLora:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a hook from a full checkpoint treated as a LoRA, with separate model and CLIP strength controls."
|
||||
SHORT_DESCRIPTION = "Create a hook from a checkpoint treated as LoRA."
|
||||
FUNCTION = "create_hook"
|
||||
|
||||
def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
|
||||
@@ -455,8 +431,6 @@ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/create"
|
||||
DESCRIPTION = "Create a model-only hook from a full checkpoint treated as a LoRA, without affecting CLIP."
|
||||
SHORT_DESCRIPTION = "Create a model-only hook from a checkpoint as LoRA."
|
||||
FUNCTION = "create_hook_model_only"
|
||||
|
||||
def create_hook_model_only(self, ckpt_name: str, strength_model: float,
|
||||
@@ -486,8 +460,6 @@ class SetHookKeyframes:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Assign keyframe schedules to hooks for controlling their strength over time during sampling."
|
||||
SHORT_DESCRIPTION = "Assign keyframe schedules to hooks over time."
|
||||
FUNCTION = "set_hook_keyframes"
|
||||
|
||||
def set_hook_keyframes(self, hooks: comfy.hooks.HookGroup, hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -516,8 +488,6 @@ class CreateHookKeyframe:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create a single hook keyframe with a strength multiplier at a specific timestep percentage."
|
||||
SHORT_DESCRIPTION = "Create a hook keyframe at a specific timestep."
|
||||
FUNCTION = "create_hook_keyframe"
|
||||
|
||||
def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: comfy.hooks.HookKeyframeGroup=None):
|
||||
@@ -553,8 +523,6 @@ class CreateHookKeyframesInterpolated:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Generate multiple interpolated hook keyframes between start and end strength values over a timestep range."
|
||||
SHORT_DESCRIPTION = "Generate interpolated hook keyframes over a timestep range."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
|
||||
@@ -600,8 +568,6 @@ class CreateHookKeyframesFromFloats:
|
||||
RETURN_TYPES = ("HOOK_KEYFRAMES",)
|
||||
RETURN_NAMES = ("HOOK_KF",)
|
||||
CATEGORY = "advanced/hooks/scheduling"
|
||||
DESCRIPTION = "Create hook keyframes from a list of float values distributed evenly across a timestep percentage range."
|
||||
SHORT_DESCRIPTION = "Create hook keyframes from a list of float values."
|
||||
FUNCTION = "create_hook_keyframes"
|
||||
|
||||
def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
|
||||
@@ -673,8 +639,6 @@ class CombineHooks:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine two hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -702,8 +666,6 @@ class CombineHooksFour:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to four hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
@@ -737,8 +699,6 @@ class CombineHooksEight:
|
||||
EXPERIMENTAL = True
|
||||
RETURN_TYPES = ("HOOKS",)
|
||||
CATEGORY = "advanced/hooks/combine"
|
||||
DESCRIPTION = "Combine up to eight hook groups into one."
|
||||
SHORT_DESCRIPTION = None
|
||||
FUNCTION = "combine_hooks"
|
||||
|
||||
def combine_hooks(self,
|
||||
|
||||
@@ -15,8 +15,6 @@ class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHunyuanDiT",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text using both BERT and mT5-XL tokenizers for Hunyuan DiT conditioning.",
|
||||
short_description="Dual-tokenizer text encoding for Hunyuan DiT.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("bert", multiline=True, dynamic_prompts=True),
|
||||
@@ -44,8 +42,6 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
|
||||
node_id="EmptyHunyuanLatentVideo",
|
||||
display_name="Empty HunyuanVideo 1.0 Latent",
|
||||
category="latent/video",
|
||||
description="Creates an empty latent tensor sized for HunyuanVideo 1.0 video generation.",
|
||||
short_description="Empty latent for HunyuanVideo 1.0 generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
@@ -71,8 +67,6 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "EmptyHunyuanVideo15Latent"
|
||||
schema.display_name = "Empty HunyuanVideo 1.5 Latent"
|
||||
schema.description = "Creates an empty latent tensor sized for HunyuanVideo 1.5 video generation with 16x spatial downscale."
|
||||
schema.short_description = "Empty latent for HunyuanVideo 1.5 generation."
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
@@ -88,8 +82,6 @@ class HunyuanVideo15ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15ImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for HunyuanVideo 1.5 image-to-video generation with start image and CLIP vision support.",
|
||||
short_description="HunyuanVideo 1.5 image-to-video conditioning setup.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -139,9 +131,6 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanVideo15SuperResolution",
|
||||
category="conditioning/video_models",
|
||||
description="Sets up conditioning for HunyuanVideo 1.5 super-resolution upscaling of a latent with noise augmentation and optional image guidance.",
|
||||
short_description="HunyuanVideo 1.5 super-resolution latent conditioning.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
@@ -188,8 +177,6 @@ class LatentUpscaleModelLoader(io.ComfyNode):
|
||||
node_id="LatentUpscaleModelLoader",
|
||||
display_name="Load Latent Upscale Model",
|
||||
category="loaders",
|
||||
description="Loads a latent upscale model from disk, supporting HunyuanVideo 720p, 1080p, and other latent upsampler architectures.",
|
||||
short_description="Load a latent upscale model from file.",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")),
|
||||
],
|
||||
@@ -239,8 +226,6 @@ class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode):
|
||||
node_id="HunyuanVideo15LatentUpscaleWithModel",
|
||||
display_name="Hunyuan Video 15 Latent Upscale With Model",
|
||||
category="latent",
|
||||
description="Upscales a video latent to a target resolution using a loaded latent upscale model and configurable upscale method.",
|
||||
short_description="Upscale video latent using a latent upscale model.",
|
||||
inputs=[
|
||||
io.LatentUpscaleModel.Input("model"),
|
||||
io.Latent.Input("samples"),
|
||||
@@ -290,8 +275,6 @@ class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeHunyuanVideo_ImageToVideo",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text with CLIP vision image embeddings for HunyuanVideo image-to-video conditioning using an interleaved template.",
|
||||
short_description="Text and image encoding for HunyuanVideo image-to-video.",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||
@@ -323,8 +306,6 @@ class HunyuanImageToVideo(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="HunyuanImageToVideo",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning and latent for Hunyuan image-to-video generation with selectable guidance type.",
|
||||
short_description="Hunyuan image-to-video conditioning with guidance options.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Vae.Input("vae"),
|
||||
@@ -376,8 +357,6 @@ class EmptyHunyuanImageLatent(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="EmptyHunyuanImageLatent",
|
||||
category="latent",
|
||||
description="Creates an empty latent tensor sized for Hunyuan image generation.",
|
||||
short_description="Empty latent for Hunyuan image generation.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
@@ -401,9 +380,6 @@ class HunyuanRefinerLatent(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanRefinerLatent",
|
||||
category="conditioning/video_models",
|
||||
description="Prepares conditioning for a Hunyuan refiner pass by concatenating the input latent with noise augmentation settings.",
|
||||
short_description="Hunyuan refiner conditioning with noise augmentation.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user