Compare commits

..

7 Commits

Author SHA1 Message Date
Comfy Org PR Bot
6c14f129af Bump comfyui-frontend-package to 1.39.14 (#12494)
* Bump comfyui-frontend-package to 1.39.13

* Update requirements.txt

---------

Co-authored-by: Christian Byrne <cbyrne@comfy.org>
2026-02-17 13:41:34 -08:00
rattus
58dcc97dcf ops: limit return of requants (#12506)
This check was far too broad and the dtype is not a reliable indicator
of wanting the requant (as QT returns the compute dtype as the dtype).
So explictly plumb whether fp8mm wants the requant or not.
2026-02-17 15:32:27 -05:00
comfyanonymous
19236edfa4 ComfyUI v0.14.1 2026-02-17 13:28:06 -05:00
ComfyUI Wiki
73c3f86973 chore: update workflow templates to v0.8.43 (#12507) 2026-02-17 13:25:55 -05:00
Alexander Piskun
262abf437b feat(api-nodes): add Recraft V4 nodes (#12502) 2026-02-17 13:25:44 -05:00
Alexander Piskun
5284e6bf69 feat(api-nodes): add "viduq3-turbo" model and Vidu3StartEnd node; fix the price badges (#12482) 2026-02-17 10:07:14 -08:00
chaObserv
44f8598521 Fix anima LLM adapter forward when manual cast (#12504) 2026-02-17 07:56:44 -08:00
8 changed files with 528 additions and 42 deletions

View File

@@ -179,8 +179,8 @@ class LLMAdapter(nn.Module):
if source_attention_mask.ndim == 2:
source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1)
x = self.in_proj(self.embed(target_input_ids))
context = source_hidden_states
x = self.in_proj(self.embed(target_input_ids, out_dtype=context.dtype))
position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0)
position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0)
position_embeddings = self.rotary_emb(x, position_ids)

View File

@@ -79,7 +79,7 @@ def cast_to_input(weight, input, non_blocking=False, copy=True):
return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy)
def cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compute_dtype):
def cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compute_dtype, want_requant):
offload_stream = None
xfer_dest = None
@@ -170,10 +170,10 @@ def cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compu
#FIXME: this is not accurate, we need to be sensitive to the compute dtype
x = lowvram_fn(x)
if (isinstance(orig, QuantizedTensor) and
(orig.dtype == dtype and len(fns) == 0 or update_weight)):
(want_requant and len(fns) == 0 or update_weight)):
seed = comfy.utils.string_to_seed(s.seed_key)
y = QuantizedTensor.from_float(x, s.layout_type, scale="recalculate", stochastic_rounding=seed)
if orig.dtype == dtype and len(fns) == 0:
if want_requant and len(fns) == 0:
#The layer actually wants our freshly saved QT
x = y
elif update_weight:
@@ -194,7 +194,7 @@ def cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compu
return weight, bias, (offload_stream, device if signature is not None else None, None)
def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, offloadable=False, compute_dtype=None):
def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, offloadable=False, compute_dtype=None, want_requant=False):
# NOTE: offloadable=False is a a legacy and if you are a custom node author reading this please pass
# offloadable=True and call uncast_bias_weight() after your last usage of the weight/bias. This
# will add async-offload support to your cast and improve performance.
@@ -212,7 +212,7 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of
non_blocking = comfy.model_management.device_supports_non_blocking(device)
if hasattr(s, "_v"):
return cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compute_dtype)
return cast_bias_weight_with_vbar(s, dtype, device, bias_dtype, non_blocking, compute_dtype, want_requant)
if offloadable and (device != s.weight.device or
(s.bias is not None and device != s.bias.device)):
@@ -850,8 +850,8 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
def _forward(self, input, weight, bias):
return torch.nn.functional.linear(input, weight, bias)
def forward_comfy_cast_weights(self, input, compute_dtype=None):
weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True, compute_dtype=compute_dtype)
def forward_comfy_cast_weights(self, input, compute_dtype=None, want_requant=False):
weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True, compute_dtype=compute_dtype, want_requant=want_requant)
x = self._forward(input, weight, bias)
uncast_bias_weight(self, weight, bias, offload_stream)
return x
@@ -881,8 +881,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
scale = comfy.model_management.cast_to_device(scale, input.device, None)
input = QuantizedTensor.from_float(input_reshaped, self.layout_type, scale=scale)
output = self.forward_comfy_cast_weights(input, compute_dtype)
output = self.forward_comfy_cast_weights(input, compute_dtype, want_requant=isinstance(input, QuantizedTensor))
# Reshape output back to 3D if input was 3D
if reshaped_3d:

View File

@@ -198,11 +198,6 @@ dict_recraft_substyles_v3 = {
}
class RecraftModel(str, Enum):
recraftv3 = 'recraftv3'
recraftv2 = 'recraftv2'
class RecraftImageSize(str, Enum):
res_1024x1024 = '1024x1024'
res_1365x1024 = '1365x1024'
@@ -221,6 +216,41 @@ class RecraftImageSize(str, Enum):
res_1707x1024 = '1707x1024'
RECRAFT_V4_SIZES = [
"1024x1024",
"1536x768",
"768x1536",
"1280x832",
"832x1280",
"1216x896",
"896x1216",
"1152x896",
"896x1152",
"832x1344",
"1280x896",
"896x1280",
"1344x768",
"768x1344",
]
RECRAFT_V4_PRO_SIZES = [
"2048x2048",
"3072x1536",
"1536x3072",
"2560x1664",
"1664x2560",
"2432x1792",
"1792x2432",
"2304x1792",
"1792x2304",
"1664x2688",
"1434x1024",
"1024x1434",
"2560x1792",
"1792x2560",
]
class RecraftColorObject(BaseModel):
rgb: list[int] = Field(..., description='An array of 3 integer values in range of 0...255 defining RGB Color Model')
@@ -234,17 +264,16 @@ class RecraftControlsObject(BaseModel):
class RecraftImageGenerationRequest(BaseModel):
prompt: str = Field(..., description='The text prompt describing the image to generate')
size: RecraftImageSize | None = Field(None, description='The size of the generated image (e.g., "1024x1024")')
size: str | None = Field(None, description='The size of the generated image (e.g., "1024x1024")')
n: int = Field(..., description='The number of images to generate')
negative_prompt: str | None = Field(None, description='A text description of undesired elements on an image')
model: RecraftModel | None = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")')
model: str = Field(...)
style: str | None = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")')
substyle: str | None = Field(None, description='The substyle to apply to the generated image, depending on the style input')
controls: RecraftControlsObject | None = Field(None, description='A set of custom parameters to tweak generation process')
style_id: str | None = Field(None, description='Use a previously uploaded style as a reference; UUID')
strength: float | None = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity')
random_seed: int | None = Field(None, description="Seed for video generation")
# text_layout
class RecraftReturnedObject(BaseModel):

View File

@@ -1,5 +1,4 @@
from io import BytesIO
from typing import Optional, Union
import aiohttp
import torch
@@ -9,6 +8,8 @@ from typing_extensions import override
from comfy.utils import ProgressBar
from comfy_api.latest import IO, ComfyExtension
from comfy_api_nodes.apis.recraft import (
RECRAFT_V4_PRO_SIZES,
RECRAFT_V4_SIZES,
RecraftColor,
RecraftColorChain,
RecraftControls,
@@ -18,7 +19,6 @@ from comfy_api_nodes.apis.recraft import (
RecraftImageGenerationResponse,
RecraftImageSize,
RecraftIO,
RecraftModel,
RecraftStyle,
RecraftStyleV3,
get_v3_substyles,
@@ -39,7 +39,7 @@ async def handle_recraft_file_request(
cls: type[IO.ComfyNode],
image: torch.Tensor,
path: str,
mask: Optional[torch.Tensor] = None,
mask: torch.Tensor | None = None,
total_pixels: int = 4096 * 4096,
timeout: int = 1024,
request=None,
@@ -73,11 +73,11 @@ async def handle_recraft_file_request(
def recraft_multipart_parser(
data,
parent_key=None,
formatter: Optional[type[callable]] = None,
converted_to_check: Optional[list[list]] = None,
formatter: type[callable] | None = None,
converted_to_check: list[list] | None = None,
is_list: bool = False,
return_mode: str = "formdata", # "dict" | "formdata"
) -> Union[dict, aiohttp.FormData]:
) -> dict | aiohttp.FormData:
"""
Formats data such that multipart/form-data will work with aiohttp library when both files and data are present.
@@ -309,7 +309,7 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
node_id="RecraftStyleV3InfiniteStyleLibrary",
display_name="Recraft Style - Infinite Style Library",
category="api node/image/Recraft",
description="Select style based on preexisting UUID from Recraft's Infinite Style Library.",
description="Choose style based on preexisting UUID from Recraft's Infinite Style Library.",
inputs=[
IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."),
],
@@ -485,7 +485,7 @@ class RecraftTextToImageNode(IO.ComfyNode):
data=RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt,
model=RecraftModel.recraftv3,
model="recraftv3",
size=size,
n=n,
style=recraft_style.style,
@@ -598,7 +598,7 @@ class RecraftImageToImageNode(IO.ComfyNode):
request = RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt,
model=RecraftModel.recraftv3,
model="recraftv3",
n=n,
strength=round(strength, 2),
style=recraft_style.style,
@@ -698,7 +698,7 @@ class RecraftImageInpaintingNode(IO.ComfyNode):
request = RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt,
model=RecraftModel.recraftv3,
model="recraftv3",
n=n,
style=recraft_style.style,
substyle=recraft_style.substyle,
@@ -810,7 +810,7 @@ class RecraftTextToVectorNode(IO.ComfyNode):
data=RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt,
model=RecraftModel.recraftv3,
model="recraftv3",
size=size,
n=n,
style=recraft_style.style,
@@ -933,7 +933,7 @@ class RecraftReplaceBackgroundNode(IO.ComfyNode):
request = RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt,
model=RecraftModel.recraftv3,
model="recraftv3",
n=n,
style=recraft_style.style,
substyle=recraft_style.substyle,
@@ -1078,6 +1078,252 @@ class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode):
)
class RecraftV4TextToImageNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="RecraftV4TextToImageNode",
display_name="Recraft V4 Text to Image",
category="api node/image/Recraft",
description="Generates images using Recraft V4 or V4 Pro models.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
tooltip="Prompt for the image generation. Maximum 10,000 characters.",
),
IO.String.Input(
"negative_prompt",
multiline=True,
tooltip="An optional text description of undesired elements on an image.",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"recraftv4",
[
IO.Combo.Input(
"size",
options=RECRAFT_V4_SIZES,
default="1024x1024",
tooltip="The size of the generated image.",
),
],
),
IO.DynamicCombo.Option(
"recraftv4_pro",
[
IO.Combo.Input(
"size",
options=RECRAFT_V4_PRO_SIZES,
default="2048x2048",
tooltip="The size of the generated image.",
),
],
),
],
tooltip="The model to use for generation.",
),
IO.Int.Input(
"n",
default=1,
min=1,
max=6,
tooltip="The number of images to generate.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; "
"actual results are nondeterministic regardless of seed.",
),
IO.Custom(RecraftIO.CONTROLS).Input(
"recraft_controls",
tooltip="Optional additional controls over the generation via the Recraft Controls node.",
optional=True,
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "n"]),
expr="""
(
$prices := {"recraftv4": 0.04, "recraftv4_pro": 0.25};
{"type":"usd","usd": $lookup($prices, widgets.model) * widgets.n}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
negative_prompt: str,
model: dict,
n: int,
seed: int,
recraft_controls: RecraftControls | None = None,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False, min_length=1, max_length=10000)
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"),
response_model=RecraftImageGenerationResponse,
data=RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt if negative_prompt else None,
model=model["model"],
size=model["size"],
n=n,
controls=recraft_controls.create_api_model() if recraft_controls else None,
),
max_retries=1,
)
images = []
for data in response.data:
with handle_recraft_image_output():
image = bytesio_to_image_tensor(await download_url_as_bytesio(data.url, timeout=1024))
if len(image.shape) < 4:
image = image.unsqueeze(0)
images.append(image)
return IO.NodeOutput(torch.cat(images, dim=0))
class RecraftV4TextToVectorNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="RecraftV4TextToVectorNode",
display_name="Recraft V4 Text to Vector",
category="api node/image/Recraft",
description="Generates SVG using Recraft V4 or V4 Pro models.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
tooltip="Prompt for the image generation. Maximum 10,000 characters.",
),
IO.String.Input(
"negative_prompt",
multiline=True,
tooltip="An optional text description of undesired elements on an image.",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"recraftv4",
[
IO.Combo.Input(
"size",
options=RECRAFT_V4_SIZES,
default="1024x1024",
tooltip="The size of the generated image.",
),
],
),
IO.DynamicCombo.Option(
"recraftv4_pro",
[
IO.Combo.Input(
"size",
options=RECRAFT_V4_PRO_SIZES,
default="2048x2048",
tooltip="The size of the generated image.",
),
],
),
],
tooltip="The model to use for generation.",
),
IO.Int.Input(
"n",
default=1,
min=1,
max=6,
tooltip="The number of images to generate.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; "
"actual results are nondeterministic regardless of seed.",
),
IO.Custom(RecraftIO.CONTROLS).Input(
"recraft_controls",
tooltip="Optional additional controls over the generation via the Recraft Controls node.",
optional=True,
),
],
outputs=[
IO.SVG.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "n"]),
expr="""
(
$prices := {"recraftv4": 0.08, "recraftv4_pro": 0.30};
{"type":"usd","usd": $lookup($prices, widgets.model) * widgets.n}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
negative_prompt: str,
model: dict,
n: int,
seed: int,
recraft_controls: RecraftControls | None = None,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False, min_length=1, max_length=10000)
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"),
response_model=RecraftImageGenerationResponse,
data=RecraftImageGenerationRequest(
prompt=prompt,
negative_prompt=negative_prompt if negative_prompt else None,
model=model["model"],
size=model["size"],
n=n,
style="vector_illustration",
substyle=None,
controls=recraft_controls.create_api_model() if recraft_controls else None,
),
max_retries=1,
)
svg_data = []
for data in response.data:
svg_data.append(await download_url_as_bytesio(data.url, timeout=1024))
return IO.NodeOutput(SVG(svg_data))
class RecraftExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@@ -1098,6 +1344,8 @@ class RecraftExtension(ComfyExtension):
RecraftCreateStyleNode,
RecraftColorRGBNode,
RecraftControlsNode,
RecraftV4TextToImageNode,
RecraftV4TextToVectorNode,
]

View File

@@ -54,6 +54,7 @@ async def execute_task(
response_model=TaskStatusResponse,
status_extractor=lambda r: r.state,
progress_extractor=lambda r: r.progress,
price_extractor=lambda r: r.credits * 0.005 if r.credits is not None else None,
max_poll_attempts=max_poll_attempts,
)
if not response.creations:
@@ -1306,6 +1307,36 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
),
],
),
IO.DynamicCombo.Option(
"viduq3-turbo",
[
IO.Combo.Input(
"aspect_ratio",
options=["16:9", "9:16", "3:4", "4:3", "1:1"],
tooltip="The aspect ratio of the output video.",
),
IO.Combo.Input(
"resolution",
options=["720p", "1080p"],
tooltip="Resolution of the output video.",
),
IO.Int.Input(
"duration",
default=5,
min=1,
max=16,
step=1,
display_mode=IO.NumberDisplay.slider,
tooltip="Duration of the output video in seconds.",
),
IO.Boolean.Input(
"audio",
default=False,
tooltip="When enabled, outputs video with sound "
"(including dialogue and sound effects).",
),
],
),
],
tooltip="Model to use for video generation.",
),
@@ -1334,13 +1365,20 @@ class Vidu3TextToVideoNode(IO.ComfyNode):
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model.duration", "model.resolution"]),
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$base := $lookup({"720p": 0.075, "1080p": 0.1}, $res);
$perSec := $lookup({"720p": 0.025, "1080p": 0.05}, $res);
{"type":"usd","usd": $base + $perSec * ($lookup(widgets, "model.duration") - 1)}
$d := $lookup(widgets, "model.duration");
$contains(widgets.model, "turbo")
? (
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
{"type":"usd","usd": $rate * $d}
)
: (
$rate := $lookup({"720p": 0.15, "1080p": 0.16}, $res);
{"type":"usd","usd": $rate * $d}
)
)
""",
),
@@ -1409,6 +1447,31 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
),
],
),
IO.DynamicCombo.Option(
"viduq3-turbo",
[
IO.Combo.Input(
"resolution",
options=["720p", "1080p"],
tooltip="Resolution of the output video.",
),
IO.Int.Input(
"duration",
default=5,
min=1,
max=16,
step=1,
display_mode=IO.NumberDisplay.slider,
tooltip="Duration of the output video in seconds.",
),
IO.Boolean.Input(
"audio",
default=False,
tooltip="When enabled, outputs video with sound "
"(including dialogue and sound effects).",
),
],
),
],
tooltip="Model to use for video generation.",
),
@@ -1442,13 +1505,20 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model.duration", "model.resolution"]),
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$base := $lookup({"720p": 0.075, "1080p": 0.275, "2k": 0.35}, $res);
$perSec := $lookup({"720p": 0.05, "1080p": 0.075, "2k": 0.075}, $res);
{"type":"usd","usd": $base + $perSec * ($lookup(widgets, "model.duration") - 1)}
$d := $lookup(widgets, "model.duration");
$contains(widgets.model, "turbo")
? (
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
{"type":"usd","usd": $rate * $d}
)
: (
$rate := $lookup({"720p": 0.15, "1080p": 0.16, "2k": 0.2}, $res);
{"type":"usd","usd": $rate * $d}
)
)
""",
),
@@ -1481,6 +1551,145 @@ class Vidu3ImageToVideoNode(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(results[0].url))
class Vidu3StartEndToVideoNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="Vidu3StartEndToVideoNode",
display_name="Vidu Q3 Start/End Frame-to-Video Generation",
category="api node/video/Vidu",
description="Generate a video from a start frame, an end frame, and a prompt.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"viduq3-pro",
[
IO.Combo.Input(
"resolution",
options=["720p", "1080p"],
tooltip="Resolution of the output video.",
),
IO.Int.Input(
"duration",
default=5,
min=1,
max=16,
step=1,
display_mode=IO.NumberDisplay.slider,
tooltip="Duration of the output video in seconds.",
),
IO.Boolean.Input(
"audio",
default=False,
tooltip="When enabled, outputs video with sound "
"(including dialogue and sound effects).",
),
],
),
IO.DynamicCombo.Option(
"viduq3-turbo",
[
IO.Combo.Input(
"resolution",
options=["720p", "1080p"],
tooltip="Resolution of the output video.",
),
IO.Int.Input(
"duration",
default=5,
min=1,
max=16,
step=1,
display_mode=IO.NumberDisplay.slider,
tooltip="Duration of the output video in seconds.",
),
IO.Boolean.Input(
"audio",
default=False,
tooltip="When enabled, outputs video with sound "
"(including dialogue and sound effects).",
),
],
),
],
tooltip="Model to use for video generation.",
),
IO.Image.Input("first_frame"),
IO.Image.Input("end_frame"),
IO.String.Input(
"prompt",
multiline=True,
tooltip="Prompt description (max 2000 characters).",
),
IO.Int.Input(
"seed",
default=1,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.duration", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$d := $lookup(widgets, "model.duration");
$contains(widgets.model, "turbo")
? (
$rate := $lookup({"720p": 0.06, "1080p": 0.08}, $res);
{"type":"usd","usd": $rate * $d}
)
: (
$rate := $lookup({"720p": 0.15, "1080p": 0.16}, $res);
{"type":"usd","usd": $rate * $d}
)
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
first_frame: Input.Image,
end_frame: Input.Image,
prompt: str,
seed: int,
) -> IO.NodeOutput:
validate_string(prompt, max_length=2000)
validate_images_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False)
payload = TaskCreationRequest(
model=model["model"],
prompt=prompt,
duration=model["duration"],
seed=seed,
resolution=model["resolution"],
audio=model["audio"],
images=[
(await upload_images_to_comfyapi(cls, frame, max_images=1, mime_type="image/png"))[0]
for frame in (first_frame, end_frame)
],
)
results = await execute_task(cls, VIDU_START_END_VIDEO, payload)
return IO.NodeOutput(await download_url_to_video_output(results[0].url))
class ViduExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@@ -1497,6 +1706,7 @@ class ViduExtension(ComfyExtension):
ViduMultiFrameVideoNode,
Vidu3TextToVideoNode,
Vidu3ImageToVideoNode,
Vidu3StartEndToVideoNode,
]

View File

@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.14.0"
__version__ = "0.14.1"

View File

@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.14.0"
version = "0.14.1"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@@ -1,5 +1,5 @@
comfyui-frontend-package==1.38.14
comfyui-workflow-templates==0.8.42
comfyui-frontend-package==1.39.14
comfyui-workflow-templates==0.8.43
comfyui-embedded-docs==0.4.1
torch
torchsde