mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-04-12 07:18:39 +00:00
Compare commits
1 Commits
feat/api-n
...
range-type
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8822627a60 |
@@ -9,6 +9,7 @@ from comfy_api.latest._input import (
|
||||
CurveInput,
|
||||
MonotoneCubicCurve,
|
||||
LinearCurve,
|
||||
RangeInput,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -21,4 +22,5 @@ __all__ = [
|
||||
"CurveInput",
|
||||
"MonotoneCubicCurve",
|
||||
"LinearCurve",
|
||||
"RangeInput",
|
||||
]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .basic_types import ImageInput, AudioInput, MaskInput, LatentInput
|
||||
from .curve_types import CurvePoint, CurveInput, MonotoneCubicCurve, LinearCurve
|
||||
from .range_types import RangeInput
|
||||
from .video_types import VideoInput
|
||||
|
||||
__all__ = [
|
||||
@@ -12,4 +13,5 @@ __all__ = [
|
||||
"CurveInput",
|
||||
"MonotoneCubicCurve",
|
||||
"LinearCurve",
|
||||
"RangeInput",
|
||||
]
|
||||
|
||||
70
comfy_api/latest/_input/range_types.py
Normal file
70
comfy_api/latest/_input/range_types.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RangeInput:
|
||||
"""Represents a levels/range adjustment: input range [min, max] with
|
||||
optional midpoint (gamma control).
|
||||
|
||||
Generates a 1D LUT identical to GIMP's levels mapping:
|
||||
1. Normalize input to [0, 1] using [min, max]
|
||||
2. Apply gamma correction: pow(value, 1/gamma)
|
||||
3. Clamp to [0, 1]
|
||||
|
||||
The midpoint field is a position in [0, 1] representing where the
|
||||
midtone falls within [min, max]. It maps to gamma via:
|
||||
gamma = -log2(midpoint)
|
||||
So midpoint=0.5 → gamma=1.0 (linear).
|
||||
"""
|
||||
|
||||
def __init__(self, min_val: float, max_val: float, midpoint: float | None = None):
|
||||
self.min_val = min_val
|
||||
self.max_val = max_val
|
||||
self.midpoint = midpoint
|
||||
|
||||
@staticmethod
|
||||
def from_raw(data) -> RangeInput:
|
||||
if isinstance(data, RangeInput):
|
||||
return data
|
||||
if isinstance(data, dict):
|
||||
return RangeInput(
|
||||
min_val=float(data.get("min", 0.0)),
|
||||
max_val=float(data.get("max", 1.0)),
|
||||
midpoint=float(data["midpoint"]) if data.get("midpoint") is not None else None,
|
||||
)
|
||||
raise TypeError(f"Cannot convert {type(data)} to RangeInput")
|
||||
|
||||
def to_lut(self, size: int = 256) -> np.ndarray:
|
||||
"""Generate a float64 lookup table mapping [0, 1] input through this
|
||||
levels adjustment.
|
||||
|
||||
The LUT maps normalized input values (0..1) to output values (0..1),
|
||||
matching the GIMP levels formula.
|
||||
"""
|
||||
xs = np.linspace(0.0, 1.0, size, dtype=np.float64)
|
||||
|
||||
in_range = self.max_val - self.min_val
|
||||
if abs(in_range) < 1e-10:
|
||||
return np.where(xs >= self.min_val, 1.0, 0.0).astype(np.float64)
|
||||
|
||||
# Normalize: map [min, max] → [0, 1]
|
||||
result = (xs - self.min_val) / in_range
|
||||
result = np.clip(result, 0.0, 1.0)
|
||||
|
||||
# Gamma correction from midpoint
|
||||
if self.midpoint is not None and self.midpoint > 0 and self.midpoint != 0.5:
|
||||
gamma = max(-math.log2(self.midpoint), 0.001)
|
||||
inv_gamma = 1.0 / gamma
|
||||
mask = result > 0
|
||||
result[mask] = np.power(result[mask], inv_gamma)
|
||||
|
||||
return result
|
||||
|
||||
def __repr__(self) -> str:
|
||||
mid = f", midpoint={self.midpoint}" if self.midpoint is not None else ""
|
||||
return f"RangeInput(min={self.min_val}, max={self.max_val}{mid})"
|
||||
@@ -1266,6 +1266,43 @@ class Histogram(ComfyTypeIO):
|
||||
Type = list[int]
|
||||
|
||||
|
||||
@comfytype(io_type="RANGE")
|
||||
class Range(ComfyTypeIO):
|
||||
from comfy_api.input import RangeInput
|
||||
if TYPE_CHECKING:
|
||||
Type = RangeInput
|
||||
|
||||
class Input(WidgetInput):
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None,
|
||||
socketless: bool=True, default: dict=None,
|
||||
display: str=None,
|
||||
gradient_stops: list=None,
|
||||
show_midpoint: bool=None,
|
||||
midpoint_scale: str=None,
|
||||
value_min: float=None,
|
||||
value_max: float=None,
|
||||
advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, None, default, socketless, None, None, None, None, advanced)
|
||||
if default is None:
|
||||
self.default = {"min": 0.0, "max": 1.0}
|
||||
self.display = display
|
||||
self.gradient_stops = gradient_stops
|
||||
self.show_midpoint = show_midpoint
|
||||
self.midpoint_scale = midpoint_scale
|
||||
self.value_min = value_min
|
||||
self.value_max = value_max
|
||||
|
||||
def as_dict(self):
|
||||
return super().as_dict() | prune_dict({
|
||||
"display": self.display,
|
||||
"gradient_stops": self.gradient_stops,
|
||||
"show_midpoint": self.show_midpoint,
|
||||
"midpoint_scale": self.midpoint_scale,
|
||||
"value_min": self.value_min,
|
||||
"value_max": self.value_max,
|
||||
})
|
||||
|
||||
|
||||
DYNAMIC_INPUT_LOOKUP: dict[str, Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]] = {}
|
||||
def register_dynamic_input_func(io_type: str, func: Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]):
|
||||
DYNAMIC_INPUT_LOOKUP[io_type] = func
|
||||
@@ -2276,5 +2313,6 @@ __all__ = [
|
||||
"BoundingBox",
|
||||
"Curve",
|
||||
"Histogram",
|
||||
"Range",
|
||||
"NodeReplace",
|
||||
]
|
||||
|
||||
@@ -24,9 +24,8 @@ from comfy_api_nodes.util import (
|
||||
AVERAGE_DURATION_VIDEO_GEN = 32
|
||||
MODELS_MAP = {
|
||||
"veo-2.0-generate-001": "veo-2.0-generate-001",
|
||||
"veo-3.1-generate": "veo-3.1-generate-001",
|
||||
"veo-3.1-fast-generate": "veo-3.1-fast-generate-001",
|
||||
"veo-3.1-lite": "veo-3.1-lite-generate-001",
|
||||
"veo-3.1-generate": "veo-3.1-generate-preview",
|
||||
"veo-3.1-fast-generate": "veo-3.1-fast-generate-preview",
|
||||
"veo-3.0-generate-001": "veo-3.0-generate-001",
|
||||
"veo-3.0-fast-generate-001": "veo-3.0-fast-generate-001",
|
||||
}
|
||||
@@ -248,8 +247,17 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
raise Exception("Video generation completed but no video was returned")
|
||||
|
||||
|
||||
class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
"""Generates videos from text prompts using Google's Veo 3 API."""
|
||||
class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
"""
|
||||
Generates videos from text prompts using Google's Veo 3 API.
|
||||
|
||||
Supported models:
|
||||
- veo-3.0-generate-001
|
||||
- veo-3.0-fast-generate-001
|
||||
|
||||
This node extends the base Veo node with Veo 3 specific features including
|
||||
audio generation and fixed 8-second duration.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
@@ -271,13 +279,6 @@ class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
default="16:9",
|
||||
tooltip="Aspect ratio of the output video",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p", "4k"],
|
||||
default="720p",
|
||||
tooltip="Output video resolution. 4K is not available for veo-3.1-lite and veo-3.0 models.",
|
||||
optional=True,
|
||||
),
|
||||
IO.String.Input(
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
@@ -288,11 +289,11 @@ class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
IO.Int.Input(
|
||||
"duration_seconds",
|
||||
default=8,
|
||||
min=4,
|
||||
min=8,
|
||||
max=8,
|
||||
step=2,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
tooltip="Duration of the output video in seconds",
|
||||
tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)",
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
@@ -331,10 +332,10 @@ class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
options=[
|
||||
"veo-3.1-generate",
|
||||
"veo-3.1-fast-generate",
|
||||
"veo-3.1-lite",
|
||||
"veo-3.0-generate-001",
|
||||
"veo-3.0-fast-generate-001",
|
||||
],
|
||||
default="veo-3.0-generate-001",
|
||||
tooltip="Veo 3 model to use for video generation",
|
||||
optional=True,
|
||||
),
|
||||
@@ -355,111 +356,21 @@ class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "resolution", "duration_seconds"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio"]),
|
||||
expr="""
|
||||
(
|
||||
$m := widgets.model;
|
||||
$r := widgets.resolution;
|
||||
$a := widgets.generate_audio;
|
||||
$seconds := widgets.duration_seconds;
|
||||
$pps :=
|
||||
$contains($m, "lite")
|
||||
? ($r = "1080p" ? ($a ? 0.08 : 0.05) : ($a ? 0.05 : 0.03))
|
||||
: $contains($m, "3.1-fast")
|
||||
? ($r = "4k" ? ($a ? 0.30 : 0.25) : $r = "1080p" ? ($a ? 0.12 : 0.10) : ($a ? 0.10 : 0.08))
|
||||
: $contains($m, "3.1-generate")
|
||||
? ($r = "4k" ? ($a ? 0.60 : 0.40) : ($a ? 0.40 : 0.20))
|
||||
: $contains($m, "3.0-fast")
|
||||
? ($a ? 0.15 : 0.10)
|
||||
: ($a ? 0.40 : 0.20);
|
||||
{"type":"usd","usd": $pps * $seconds}
|
||||
($contains($m,"veo-3.0-fast-generate-001") or $contains($m,"veo-3.1-fast-generate"))
|
||||
? {"type":"usd","usd": ($a ? 1.2 : 0.8)}
|
||||
: ($contains($m,"veo-3.0-generate-001") or $contains($m,"veo-3.1-generate"))
|
||||
? {"type":"usd","usd": ($a ? 3.2 : 1.6)}
|
||||
: {"type":"range_usd","min_usd":0.8,"max_usd":3.2}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt,
|
||||
aspect_ratio="16:9",
|
||||
resolution="720p",
|
||||
negative_prompt="",
|
||||
duration_seconds=8,
|
||||
enhance_prompt=True,
|
||||
person_generation="ALLOW",
|
||||
seed=0,
|
||||
image=None,
|
||||
model="veo-3.0-generate-001",
|
||||
generate_audio=False,
|
||||
):
|
||||
if "lite" in model and resolution == "4k":
|
||||
raise Exception("4K resolution is not supported by the veo-3.1-lite model.")
|
||||
|
||||
model = MODELS_MAP[model]
|
||||
|
||||
instances = [{"prompt": prompt}]
|
||||
if image is not None:
|
||||
image_base64 = tensor_to_base64_string(image)
|
||||
if image_base64:
|
||||
instances[0]["image"] = {"bytesBase64Encoded": image_base64, "mimeType": "image/png"}
|
||||
|
||||
parameters = {
|
||||
"aspectRatio": aspect_ratio,
|
||||
"personGeneration": person_generation,
|
||||
"durationSeconds": duration_seconds,
|
||||
"enhancePrompt": True,
|
||||
"generateAudio": generate_audio,
|
||||
}
|
||||
if negative_prompt:
|
||||
parameters["negativePrompt"] = negative_prompt
|
||||
if seed > 0:
|
||||
parameters["seed"] = seed
|
||||
if "veo-3.1" in model:
|
||||
parameters["resolution"] = resolution
|
||||
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/veo/{model}/generate", method="POST"),
|
||||
response_model=VeoGenVidResponse,
|
||||
data=VeoGenVidRequest(
|
||||
instances=instances,
|
||||
parameters=parameters,
|
||||
),
|
||||
)
|
||||
|
||||
poll_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/veo/{model}/poll", method="POST"),
|
||||
response_model=VeoGenVidPollResponse,
|
||||
status_extractor=lambda r: "completed" if r.done else "pending",
|
||||
data=VeoGenVidPollRequest(operationName=initial_response.name),
|
||||
poll_interval=5.0,
|
||||
estimated_duration=AVERAGE_DURATION_VIDEO_GEN,
|
||||
)
|
||||
|
||||
if poll_response.error:
|
||||
raise Exception(f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})")
|
||||
|
||||
response = poll_response.response
|
||||
filtered_count = response.raiMediaFilteredCount
|
||||
if filtered_count:
|
||||
reasons = response.raiMediaFilteredReasons or []
|
||||
reason_part = f": {reasons[0]}" if reasons else ""
|
||||
raise Exception(
|
||||
f"Content blocked by Google's Responsible AI filters{reason_part} "
|
||||
f"({filtered_count} video{'s' if filtered_count != 1 else ''} filtered)."
|
||||
)
|
||||
|
||||
if response.videos:
|
||||
video = response.videos[0]
|
||||
if video.bytesBase64Encoded:
|
||||
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
|
||||
if video.gcsUri:
|
||||
return IO.NodeOutput(await download_url_to_video_output(video.gcsUri))
|
||||
raise Exception("Video returned but no data or URL was provided")
|
||||
raise Exception("Video generation completed but no video was returned")
|
||||
|
||||
|
||||
class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
|
||||
@@ -483,7 +394,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
default="",
|
||||
tooltip="Negative text prompt to guide what to avoid in the video",
|
||||
),
|
||||
IO.Combo.Input("resolution", options=["720p", "1080p", "4k"]),
|
||||
IO.Combo.Input("resolution", options=["720p", "1080p"]),
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=["16:9", "9:16"],
|
||||
@@ -513,7 +424,8 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
IO.Image.Input("last_frame", tooltip="End frame"),
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["veo-3.1-generate", "veo-3.1-fast-generate", "veo-3.1-lite"],
|
||||
options=["veo-3.1-generate", "veo-3.1-fast-generate"],
|
||||
default="veo-3.1-fast-generate",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
@@ -531,20 +443,26 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "duration", "resolution"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "duration"]),
|
||||
expr="""
|
||||
(
|
||||
$prices := {
|
||||
"veo-3.1-fast-generate": { "audio": 0.15, "no_audio": 0.10 },
|
||||
"veo-3.1-generate": { "audio": 0.40, "no_audio": 0.20 }
|
||||
};
|
||||
$m := widgets.model;
|
||||
$r := widgets.resolution;
|
||||
$ga := widgets.generate_audio;
|
||||
$ga := (widgets.generate_audio = "true");
|
||||
$seconds := widgets.duration;
|
||||
$pps :=
|
||||
$contains($m, "lite")
|
||||
? ($r = "1080p" ? ($ga ? 0.08 : 0.05) : ($ga ? 0.05 : 0.03))
|
||||
: $contains($m, "fast")
|
||||
? ($r = "4k" ? ($ga ? 0.30 : 0.25) : $r = "1080p" ? ($ga ? 0.12 : 0.10) : ($ga ? 0.10 : 0.08))
|
||||
: ($r = "4k" ? ($ga ? 0.60 : 0.40) : ($ga ? 0.40 : 0.20));
|
||||
{"type":"usd","usd": $pps * $seconds}
|
||||
$modelKey :=
|
||||
$contains($m, "veo-3.1-fast-generate") ? "veo-3.1-fast-generate" :
|
||||
$contains($m, "veo-3.1-generate") ? "veo-3.1-generate" :
|
||||
"";
|
||||
$audioKey := $ga ? "audio" : "no_audio";
|
||||
$modelPrices := $lookup($prices, $modelKey);
|
||||
$pps := $lookup($modelPrices, $audioKey);
|
||||
($pps != null)
|
||||
? {"type":"usd","usd": $pps * $seconds}
|
||||
: {"type":"range_usd","min_usd": 0.4, "max_usd": 3.2}
|
||||
)
|
||||
""",
|
||||
),
|
||||
@@ -564,9 +482,6 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
model: str,
|
||||
generate_audio: bool,
|
||||
):
|
||||
if "lite" in model and resolution == "4k":
|
||||
raise Exception("4K resolution is not supported by the veo-3.1-lite model.")
|
||||
|
||||
model = MODELS_MAP[model]
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
|
||||
Reference in New Issue
Block a user