mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-11 18:50:03 +00:00
Compare commits
12 Commits
v0.11.0
...
pysssss/ba
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aaea976f36 | ||
|
|
cee092213e | ||
|
|
3da0e9c367 | ||
|
|
9fa8202620 | ||
|
|
b4438c9baf | ||
|
|
1711020904 | ||
|
|
d9b8567547 | ||
|
|
6c5f906bf2 | ||
|
|
4f5bd39b1c | ||
|
|
dcff27fe3f | ||
|
|
cc30293d65 | ||
|
|
866d863128 |
4
.github/workflows/test-build.yml
vendored
4
.github/workflows/test-build.yml
vendored
@@ -25,6 +25,10 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libx11-dev
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
||||
@@ -208,7 +208,7 @@ comfy install
|
||||
|
||||
## Manual Install (Windows, Linux)
|
||||
|
||||
Python 3.14 works but you may encounter issues with the torch compile node. The free threaded variant is still missing some dependencies.
|
||||
Python 3.14 works but some custom nodes may have issues. The free threaded variant works but some dependencies will enable the GIL so it's not fully supported.
|
||||
|
||||
Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12
|
||||
|
||||
|
||||
@@ -236,6 +236,8 @@ class ComfyNodeABC(ABC):
|
||||
"""Flags a node as experimental, informing users that it may change or not work as expected."""
|
||||
DEPRECATED: bool
|
||||
"""Flags a node as deprecated, indicating to users that they should find alternatives to this node."""
|
||||
DEV_ONLY: bool
|
||||
"""Flags a node as dev-only, hiding it from search/menus unless dev mode is enabled."""
|
||||
API_NODE: Optional[bool]
|
||||
"""Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview."""
|
||||
|
||||
|
||||
@@ -1247,6 +1247,7 @@ class NodeInfoV1:
|
||||
output_node: bool=None
|
||||
deprecated: bool=None
|
||||
experimental: bool=None
|
||||
dev_only: bool=None
|
||||
api_node: bool=None
|
||||
price_badge: dict | None = None
|
||||
search_aliases: list[str]=None
|
||||
@@ -1264,6 +1265,7 @@ class NodeInfoV3:
|
||||
output_node: bool=None
|
||||
deprecated: bool=None
|
||||
experimental: bool=None
|
||||
dev_only: bool=None
|
||||
api_node: bool=None
|
||||
price_badge: dict | None = None
|
||||
|
||||
@@ -1375,6 +1377,8 @@ class Schema:
|
||||
"""Flags a node as deprecated, indicating to users that they should find alternatives to this node."""
|
||||
is_experimental: bool=False
|
||||
"""Flags a node as experimental, informing users that it may change or not work as expected."""
|
||||
is_dev_only: bool=False
|
||||
"""Flags a node as dev-only, hiding it from search/menus unless dev mode is enabled."""
|
||||
is_api_node: bool=False
|
||||
"""Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview."""
|
||||
price_badge: PriceBadge | None = None
|
||||
@@ -1485,6 +1489,7 @@ class Schema:
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
dev_only=self.is_dev_only,
|
||||
api_node=self.is_api_node,
|
||||
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
|
||||
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
|
||||
@@ -1519,6 +1524,7 @@ class Schema:
|
||||
output_node=self.is_output_node,
|
||||
deprecated=self.is_deprecated,
|
||||
experimental=self.is_experimental,
|
||||
dev_only=self.is_dev_only,
|
||||
api_node=self.is_api_node,
|
||||
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
|
||||
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
|
||||
@@ -1791,6 +1797,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DEPRECATED
|
||||
|
||||
_DEV_ONLY = None
|
||||
@final
|
||||
@classproperty
|
||||
def DEV_ONLY(cls): # noqa
|
||||
if cls._DEV_ONLY is None:
|
||||
cls.GET_SCHEMA()
|
||||
return cls._DEV_ONLY
|
||||
|
||||
_API_NODE = None
|
||||
@final
|
||||
@classproperty
|
||||
@@ -1893,6 +1907,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
|
||||
cls._EXPERIMENTAL = schema.is_experimental
|
||||
if cls._DEPRECATED is None:
|
||||
cls._DEPRECATED = schema.is_deprecated
|
||||
if cls._DEV_ONLY is None:
|
||||
cls._DEV_ONLY = schema.is_dev_only
|
||||
if cls._API_NODE is None:
|
||||
cls._API_NODE = schema.is_api_node
|
||||
if cls._OUTPUT_NODE is None:
|
||||
|
||||
67
comfy_api_nodes/apis/grok.py
Normal file
67
comfy_api_nodes/apis/grok.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ImageGenerationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
prompt: str = Field(...)
|
||||
aspect_ratio: str = Field(...)
|
||||
n: int = Field(...)
|
||||
seed: int = Field(...)
|
||||
response_for: str = Field("url")
|
||||
|
||||
|
||||
class InputUrlObject(BaseModel):
|
||||
url: str = Field(...)
|
||||
|
||||
|
||||
class ImageEditRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
image: InputUrlObject = Field(...)
|
||||
prompt: str = Field(...)
|
||||
resolution: str = Field(...)
|
||||
n: int = Field(...)
|
||||
seed: int = Field(...)
|
||||
response_for: str = Field("url")
|
||||
|
||||
|
||||
class VideoGenerationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
prompt: str = Field(...)
|
||||
image: InputUrlObject | None = Field(...)
|
||||
duration: int = Field(...)
|
||||
aspect_ratio: str | None = Field(...)
|
||||
resolution: str = Field(...)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class VideoEditRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
prompt: str = Field(...)
|
||||
video: InputUrlObject = Field(...)
|
||||
seed: int = Field(...)
|
||||
|
||||
|
||||
class ImageResponseObject(BaseModel):
|
||||
url: str | None = Field(None)
|
||||
b64_json: str | None = Field(None)
|
||||
revised_prompt: str | None = Field(None)
|
||||
|
||||
|
||||
class ImageGenerationResponse(BaseModel):
|
||||
data: list[ImageResponseObject] = Field(...)
|
||||
|
||||
|
||||
class VideoGenerationResponse(BaseModel):
|
||||
request_id: str = Field(...)
|
||||
|
||||
|
||||
class VideoResponseObject(BaseModel):
|
||||
url: str = Field(...)
|
||||
upsampled_prompt: str | None = Field(None)
|
||||
duration: int = Field(...)
|
||||
|
||||
|
||||
class VideoStatusResponse(BaseModel):
|
||||
status: str | None = Field(None)
|
||||
video: VideoResponseObject | None = Field(None)
|
||||
model: str | None = Field(None)
|
||||
417
comfy_api_nodes/nodes_grok.py
Normal file
417
comfy_api_nodes/nodes_grok.py
Normal file
@@ -0,0 +1,417 @@
|
||||
import torch
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.grok import (
|
||||
ImageEditRequest,
|
||||
ImageGenerationRequest,
|
||||
ImageGenerationResponse,
|
||||
InputUrlObject,
|
||||
VideoEditRequest,
|
||||
VideoGenerationRequest,
|
||||
VideoGenerationResponse,
|
||||
VideoStatusResponse,
|
||||
)
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
download_url_to_image_tensor,
|
||||
download_url_to_video_output,
|
||||
get_fs_object_size,
|
||||
get_number_of_images,
|
||||
poll_op,
|
||||
sync_op,
|
||||
tensor_to_base64_string,
|
||||
upload_video_to_comfyapi,
|
||||
validate_string,
|
||||
validate_video_duration,
|
||||
)
|
||||
|
||||
|
||||
class GrokImageNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="GrokImageNode",
|
||||
display_name="Grok Image",
|
||||
category="api node/image/Grok",
|
||||
description="Generate images using Grok based on a text prompt",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="The text prompt used to generate the image",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[
|
||||
"1:1",
|
||||
"2:3",
|
||||
"3:2",
|
||||
"3:4",
|
||||
"4:3",
|
||||
"9:16",
|
||||
"16:9",
|
||||
"9:19.5",
|
||||
"19.5:9",
|
||||
"9:20",
|
||||
"20:9",
|
||||
"1:2",
|
||||
"2:1",
|
||||
],
|
||||
),
|
||||
IO.Int.Input(
|
||||
"number_of_images",
|
||||
default=1,
|
||||
min=1,
|
||||
max=10,
|
||||
step=1,
|
||||
tooltip="Number of images to generate",
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["number_of_images"]),
|
||||
expr="""{"type":"usd","usd":0.033 * widgets.number_of_images}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: str,
|
||||
prompt: str,
|
||||
aspect_ratio: str,
|
||||
number_of_images: int,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/xai/v1/images/generations", method="POST"),
|
||||
data=ImageGenerationRequest(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
aspect_ratio=aspect_ratio,
|
||||
n=number_of_images,
|
||||
seed=seed,
|
||||
),
|
||||
response_model=ImageGenerationResponse,
|
||||
)
|
||||
if len(response.data) == 1:
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(response.data[0].url))
|
||||
return IO.NodeOutput(
|
||||
torch.cat(
|
||||
[await download_url_to_image_tensor(i) for i in [str(d.url) for d in response.data if d.url]],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class GrokImageEditNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="GrokImageEditNode",
|
||||
display_name="Grok Image Edit",
|
||||
category="api node/image/Grok",
|
||||
description="Modify an existing image based on a text prompt",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-image-beta"]),
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="The text prompt used to generate the image",
|
||||
),
|
||||
IO.Combo.Input("resolution", options=["1K"]),
|
||||
IO.Int.Input(
|
||||
"number_of_images",
|
||||
default=1,
|
||||
min=1,
|
||||
max=10,
|
||||
step=1,
|
||||
tooltip="Number of edited images to generate",
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["number_of_images"]),
|
||||
expr="""{"type":"usd","usd":0.002 + 0.033 * widgets.number_of_images}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: str,
|
||||
image: Input.Image,
|
||||
prompt: str,
|
||||
resolution: str,
|
||||
number_of_images: int,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Only one input image is supported.")
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/xai/v1/images/edits", method="POST"),
|
||||
data=ImageEditRequest(
|
||||
model=model,
|
||||
image=InputUrlObject(url=f"data:image/png;base64,{tensor_to_base64_string(image)}"),
|
||||
prompt=prompt,
|
||||
resolution=resolution.lower(),
|
||||
n=number_of_images,
|
||||
seed=seed,
|
||||
),
|
||||
response_model=ImageGenerationResponse,
|
||||
)
|
||||
if len(response.data) == 1:
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(response.data[0].url))
|
||||
return IO.NodeOutput(
|
||||
torch.cat(
|
||||
[await download_url_to_image_tensor(i) for i in [str(d.url) for d in response.data if d.url]],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class GrokVideoNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="GrokVideoNode",
|
||||
display_name="Grok Video",
|
||||
category="api node/video/Grok",
|
||||
description="Generate video from a prompt or an image",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Text description of the desired video.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["480p", "720p"],
|
||||
tooltip="The resolution of the output video.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=["auto", "16:9", "4:3", "3:2", "1:1", "2:3", "3:4", "9:16"],
|
||||
tooltip="The aspect ratio of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=6,
|
||||
min=1,
|
||||
max=15,
|
||||
step=1,
|
||||
tooltip="The duration of the output video in seconds.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
IO.Image.Input("image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["duration"], inputs=["image"]),
|
||||
expr="""
|
||||
(
|
||||
$base := 0.181 * widgets.duration;
|
||||
{"type":"usd","usd": inputs.image.connected ? $base + 0.002 : $base}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: str,
|
||||
prompt: str,
|
||||
resolution: str,
|
||||
aspect_ratio: str,
|
||||
duration: int,
|
||||
seed: int,
|
||||
image: Input.Image | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
image_url = None
|
||||
if image is not None:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Only one input image is supported.")
|
||||
image_url = InputUrlObject(url=f"data:image/png;base64,{tensor_to_base64_string(image)}")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/xai/v1/videos/generations", method="POST"),
|
||||
data=VideoGenerationRequest(
|
||||
model=model,
|
||||
image=image_url,
|
||||
prompt=prompt,
|
||||
resolution=resolution,
|
||||
duration=duration,
|
||||
aspect_ratio=None if aspect_ratio == "auto" else aspect_ratio,
|
||||
seed=seed,
|
||||
),
|
||||
response_model=VideoGenerationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/xai/v1/videos/{initial_response.request_id}"),
|
||||
status_extractor=lambda r: r.status if r.status is not None else "complete",
|
||||
response_model=VideoStatusResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.video.url))
|
||||
|
||||
|
||||
class GrokVideoEditNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="GrokVideoEditNode",
|
||||
display_name="Grok Video Edit",
|
||||
category="api node/video/Grok",
|
||||
description="Edit an existing video based on a text prompt.",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["grok-imagine-video-beta"]),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Text description of the desired video.",
|
||||
),
|
||||
IO.Video.Input("video", tooltip="Maximum supported duration is 8.7 seconds and 50MB file size."),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; "
|
||||
"actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd": 0.191, "format": {"suffix": "/sec", "approximate": true}}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: str,
|
||||
prompt: str,
|
||||
video: Input.Video,
|
||||
seed: int,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
validate_video_duration(video, min_duration=1, max_duration=8.7)
|
||||
video_stream = video.get_stream_source()
|
||||
video_size = get_fs_object_size(video_stream)
|
||||
if video_size > 50 * 1024 * 1024:
|
||||
raise ValueError(f"Video size ({video_size / 1024 / 1024:.1f}MB) exceeds 50MB limit.")
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/xai/v1/videos/edits", method="POST"),
|
||||
data=VideoEditRequest(
|
||||
model=model,
|
||||
video=InputUrlObject(url=await upload_video_to_comfyapi(cls, video)),
|
||||
prompt=prompt,
|
||||
seed=seed,
|
||||
),
|
||||
response_model=VideoGenerationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/xai/v1/videos/{initial_response.request_id}"),
|
||||
status_extractor=lambda r: r.status if r.status is not None else "complete",
|
||||
response_model=VideoStatusResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.video.url))
|
||||
|
||||
|
||||
class GrokExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
GrokImageNode,
|
||||
GrokImageEditNode,
|
||||
GrokVideoNode,
|
||||
GrokVideoEditNode,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> GrokExtension:
|
||||
return GrokExtension()
|
||||
439
comfy_extras/nodes_glsl.py
Normal file
439
comfy_extras/nodes_glsl.py
Normal file
@@ -0,0 +1,439 @@
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from typing import TypedDict, Generator
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
import nodes
|
||||
from comfy_api.latest import ComfyExtension, io, ui
|
||||
from comfy.cli_args import args
|
||||
from typing_extensions import override
|
||||
from utils.install_util import get_missing_requirements_message
|
||||
|
||||
|
||||
class SizeModeInput(TypedDict):
|
||||
size_mode: str
|
||||
width: int
|
||||
height: int
|
||||
|
||||
|
||||
MAX_IMAGES = 5 # u_image0-4
|
||||
MAX_UNIFORMS = 5 # u_float0-4, u_int0-4
|
||||
MAX_OUTPUTS = 4 # fragColor0-3 (MRT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import moderngl
|
||||
except ImportError as e:
|
||||
raise RuntimeError(f"ModernGL is not available.\n{get_missing_requirements_message()}") from e
|
||||
|
||||
# Default NOOP fragment shader that passes through the input image unchanged
|
||||
# For multiple outputs, use: layout(location = 0) out vec4 fragColor0; etc.
|
||||
DEFAULT_FRAGMENT_SHADER = """#version 300 es
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_image0;
|
||||
uniform vec2 u_resolution;
|
||||
|
||||
in vec2 v_texCoord;
|
||||
layout(location = 0) out vec4 fragColor0;
|
||||
|
||||
void main() {
|
||||
fragColor0 = texture(u_image0, v_texCoord);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
# Simple vertex shader for full-screen quad
|
||||
VERTEX_SHADER = """#version 330
|
||||
|
||||
in vec2 in_position;
|
||||
in vec2 in_texcoord;
|
||||
|
||||
out vec2 v_texCoord;
|
||||
|
||||
void main() {
|
||||
gl_Position = vec4(in_position, 0.0, 1.0);
|
||||
v_texCoord = in_texcoord;
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def _convert_es_to_desktop_glsl(source: str) -> str:
|
||||
"""Convert GLSL ES 3.00 shader to desktop GLSL 3.30 for ModernGL compatibility."""
|
||||
return re.sub(r'#version\s+300\s+es', '#version 330', source)
|
||||
|
||||
|
||||
def _create_software_gl_context() -> moderngl.Context:
|
||||
original_env = os.environ.get("LIBGL_ALWAYS_SOFTWARE")
|
||||
os.environ["LIBGL_ALWAYS_SOFTWARE"] = "1"
|
||||
try:
|
||||
ctx = moderngl.create_standalone_context(require=330)
|
||||
logger.info(f"Created software-rendered OpenGL context: {ctx.info['GL_RENDERER']}")
|
||||
return ctx
|
||||
finally:
|
||||
if original_env is None:
|
||||
os.environ.pop("LIBGL_ALWAYS_SOFTWARE", None)
|
||||
else:
|
||||
os.environ["LIBGL_ALWAYS_SOFTWARE"] = original_env
|
||||
|
||||
|
||||
def _create_gl_context(force_software: bool = False) -> moderngl.Context:
|
||||
if force_software:
|
||||
try:
|
||||
return _create_software_gl_context()
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
"Failed to create software-rendered OpenGL context.\n"
|
||||
"Ensure Mesa/llvmpipe is installed for software rendering support."
|
||||
) from e
|
||||
|
||||
# Try hardware rendering first, fall back to software
|
||||
try:
|
||||
ctx = moderngl.create_standalone_context(require=330)
|
||||
logger.info(f"Created OpenGL context: {ctx.info['GL_RENDERER']}")
|
||||
return ctx
|
||||
except Exception as hw_error:
|
||||
logger.warning(f"Hardware OpenGL context creation failed: {hw_error}")
|
||||
logger.info("Attempting software rendering fallback...")
|
||||
try:
|
||||
return _create_software_gl_context()
|
||||
except Exception as sw_error:
|
||||
raise RuntimeError(
|
||||
f"Failed to create OpenGL context.\n"
|
||||
f"Hardware error: {hw_error}\n\n"
|
||||
f"Possible solutions:\n"
|
||||
f"1. Install GPU drivers with OpenGL 3.3+ support\n"
|
||||
f"2. Install Mesa for software rendering (Linux: apt install libgl1-mesa-dri)\n"
|
||||
f"3. On headless servers, ensure virtual framebuffer (Xvfb) or EGL is available"
|
||||
) from sw_error
|
||||
|
||||
|
||||
def _image_to_texture(ctx: moderngl.Context, image: np.ndarray) -> moderngl.Texture:
|
||||
height, width = image.shape[:2]
|
||||
channels = image.shape[2] if len(image.shape) > 2 else 1
|
||||
|
||||
components = min(channels, 4)
|
||||
|
||||
image_uint8 = (np.clip(image, 0, 1) * 255).astype(np.uint8)
|
||||
|
||||
# Flip vertically for OpenGL coordinate system (origin at bottom-left)
|
||||
image_uint8 = np.ascontiguousarray(np.flipud(image_uint8))
|
||||
|
||||
texture = ctx.texture((width, height), components, image_uint8.tobytes())
|
||||
texture.filter = (moderngl.LINEAR, moderngl.LINEAR)
|
||||
texture.repeat_x = False
|
||||
texture.repeat_y = False
|
||||
|
||||
return texture
|
||||
|
||||
|
||||
def _texture_to_image(fbo: moderngl.Framebuffer, attachment: int = 0, channels: int = 4) -> np.ndarray:
|
||||
width, height = fbo.size
|
||||
|
||||
data = fbo.read(components=channels, attachment=attachment)
|
||||
image = np.frombuffer(data, dtype=np.uint8).reshape((height, width, channels))
|
||||
|
||||
image = np.ascontiguousarray(np.flipud(image))
|
||||
|
||||
return image.astype(np.float32) / 255.0
|
||||
|
||||
|
||||
def _compile_shader(ctx: moderngl.Context, fragment_source: str) -> moderngl.Program:
|
||||
# Convert user's GLSL ES 3.00 fragment shader to desktop GLSL 3.30 for ModernGL
|
||||
fragment_source = _convert_es_to_desktop_glsl(fragment_source)
|
||||
|
||||
try:
|
||||
program = ctx.program(
|
||||
vertex_shader=VERTEX_SHADER,
|
||||
fragment_shader=fragment_source,
|
||||
)
|
||||
return program
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
"Fragment shader compilation failed.\n\n"
|
||||
"Make sure your shader:\n"
|
||||
"1. Uses #version 300 es (WebGL 2.0 compatible)\n"
|
||||
"2. Has valid GLSL ES 3.00 syntax\n"
|
||||
"3. Includes 'precision highp float;' after version\n"
|
||||
"4. Uses 'out vec4 fragColor' instead of gl_FragColor\n"
|
||||
"5. Declares uniforms correctly (e.g., uniform sampler2D u_image0;)"
|
||||
) from e
|
||||
|
||||
|
||||
def _render_shader(
|
||||
ctx: moderngl.Context,
|
||||
program: moderngl.Program,
|
||||
width: int,
|
||||
height: int,
|
||||
textures: list[moderngl.Texture],
|
||||
uniforms: dict[str, int | float],
|
||||
) -> list[np.ndarray]:
|
||||
# Create output textures
|
||||
output_textures = []
|
||||
for _ in range(MAX_OUTPUTS):
|
||||
tex = ctx.texture((width, height), 4)
|
||||
tex.filter = (moderngl.LINEAR, moderngl.LINEAR)
|
||||
output_textures.append(tex)
|
||||
|
||||
fbo = ctx.framebuffer(color_attachments=output_textures)
|
||||
|
||||
# Full-screen quad vertices (position + texcoord)
|
||||
vertices = np.array([
|
||||
# Position (x, y), Texcoord (u, v)
|
||||
-1.0, -1.0, 0.0, 0.0,
|
||||
1.0, -1.0, 1.0, 0.0,
|
||||
-1.0, 1.0, 0.0, 1.0,
|
||||
1.0, 1.0, 1.0, 1.0,
|
||||
], dtype='f4')
|
||||
|
||||
vbo = ctx.buffer(vertices.tobytes())
|
||||
vao = ctx.vertex_array(
|
||||
program,
|
||||
[(vbo, '2f 2f', 'in_position', 'in_texcoord')],
|
||||
)
|
||||
|
||||
try:
|
||||
# Bind textures
|
||||
for i, texture in enumerate(textures):
|
||||
texture.use(i)
|
||||
uniform_name = f'u_image{i}'
|
||||
if uniform_name in program:
|
||||
program[uniform_name].value = i
|
||||
|
||||
# Set uniforms
|
||||
if 'u_resolution' in program:
|
||||
program['u_resolution'].value = (float(width), float(height))
|
||||
|
||||
for name, value in uniforms.items():
|
||||
if name in program:
|
||||
program[name].value = value
|
||||
|
||||
# Render
|
||||
fbo.use()
|
||||
fbo.clear(0.0, 0.0, 0.0, 1.0)
|
||||
vao.render(moderngl.TRIANGLE_STRIP)
|
||||
|
||||
# Read results from all attachments
|
||||
results = []
|
||||
for i in range(MAX_OUTPUTS):
|
||||
results.append(_texture_to_image(fbo, attachment=i, channels=4))
|
||||
return results
|
||||
finally:
|
||||
vao.release()
|
||||
vbo.release()
|
||||
for tex in output_textures:
|
||||
tex.release()
|
||||
fbo.release()
|
||||
|
||||
|
||||
def _prepare_textures(
|
||||
ctx: moderngl.Context,
|
||||
image_list: list[torch.Tensor],
|
||||
batch_idx: int,
|
||||
) -> list[moderngl.Texture]:
|
||||
textures = []
|
||||
for img_tensor in image_list[:MAX_IMAGES]:
|
||||
img_idx = min(batch_idx, img_tensor.shape[0] - 1)
|
||||
img_np = img_tensor[img_idx].cpu().numpy()
|
||||
textures.append(_image_to_texture(ctx, img_np))
|
||||
return textures
|
||||
|
||||
|
||||
def _prepare_uniforms(int_list: list[int], float_list: list[float]) -> dict[str, int | float]:
|
||||
uniforms: dict[str, int | float] = {}
|
||||
for i, val in enumerate(int_list[:MAX_UNIFORMS]):
|
||||
uniforms[f'u_int{i}'] = int(val)
|
||||
for i, val in enumerate(float_list[:MAX_UNIFORMS]):
|
||||
uniforms[f'u_float{i}'] = float(val)
|
||||
return uniforms
|
||||
|
||||
|
||||
def _release_textures(textures: list[moderngl.Texture]) -> None:
|
||||
for texture in textures:
|
||||
texture.release()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _gl_context(force_software: bool = False) -> Generator[moderngl.Context, None, None]:
|
||||
ctx = _create_gl_context(force_software)
|
||||
try:
|
||||
yield ctx
|
||||
finally:
|
||||
ctx.release()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _shader_program(ctx: moderngl.Context, fragment_source: str) -> Generator[moderngl.Program, None, None]:
|
||||
program = _compile_shader(ctx, fragment_source)
|
||||
try:
|
||||
yield program
|
||||
finally:
|
||||
program.release()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _textures_context(
|
||||
ctx: moderngl.Context,
|
||||
image_list: list[torch.Tensor],
|
||||
batch_idx: int,
|
||||
) -> Generator[list[moderngl.Texture], None, None]:
|
||||
textures = _prepare_textures(ctx, image_list, batch_idx)
|
||||
try:
|
||||
yield textures
|
||||
finally:
|
||||
_release_textures(textures)
|
||||
|
||||
|
||||
class GLSLShader(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
# Create autogrow templates
|
||||
image_template = io.Autogrow.TemplatePrefix(
|
||||
io.Image.Input("image"),
|
||||
prefix="image",
|
||||
min=1,
|
||||
max=MAX_IMAGES,
|
||||
)
|
||||
|
||||
float_template = io.Autogrow.TemplatePrefix(
|
||||
io.Float.Input("float", default=0.0),
|
||||
prefix="u_float",
|
||||
min=0,
|
||||
max=MAX_UNIFORMS,
|
||||
)
|
||||
|
||||
int_template = io.Autogrow.TemplatePrefix(
|
||||
io.Int.Input("int", default=0),
|
||||
prefix="u_int",
|
||||
min=0,
|
||||
max=MAX_UNIFORMS,
|
||||
)
|
||||
|
||||
return io.Schema(
|
||||
node_id="GLSLShader",
|
||||
display_name="GLSL Shader",
|
||||
category="image/shader",
|
||||
description=(
|
||||
f"Apply GLSL fragment shaders to images. "
|
||||
f"Inputs: u_image0-{MAX_IMAGES-1} (sampler2D), u_resolution (vec2), "
|
||||
f"u_float0-{MAX_UNIFORMS-1}, u_int0-{MAX_UNIFORMS-1}. "
|
||||
f"Outputs: layout(location = 0-{MAX_OUTPUTS-1}) out vec4 fragColor0-{MAX_OUTPUTS-1}."
|
||||
),
|
||||
inputs=[
|
||||
io.String.Input(
|
||||
"fragment_shader",
|
||||
default=DEFAULT_FRAGMENT_SHADER,
|
||||
multiline=True,
|
||||
tooltip="GLSL fragment shader source code (GLSL ES 3.00 / WebGL 2.0 compatible)",
|
||||
),
|
||||
io.DynamicCombo.Input(
|
||||
"size_mode",
|
||||
options=[
|
||||
io.DynamicCombo.Option(
|
||||
"from_input",
|
||||
[], # No extra inputs - uses first input image dimensions
|
||||
),
|
||||
io.DynamicCombo.Option(
|
||||
"custom",
|
||||
[
|
||||
io.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Output size: 'from_input' uses first input image dimensions, 'custom' allows manual size",
|
||||
),
|
||||
io.Autogrow.Input("images", template=image_template),
|
||||
io.Autogrow.Input("floats", template=float_template),
|
||||
io.Autogrow.Input("ints", template=int_template),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="IMAGE0"),
|
||||
io.Image.Output(display_name="IMAGE1"),
|
||||
io.Image.Output(display_name="IMAGE2"),
|
||||
io.Image.Output(display_name="IMAGE3"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls,
|
||||
fragment_shader: str,
|
||||
size_mode: SizeModeInput,
|
||||
images: io.Autogrow.Type,
|
||||
floats: io.Autogrow.Type = None,
|
||||
ints: io.Autogrow.Type = None,
|
||||
**kwargs,
|
||||
) -> io.NodeOutput:
|
||||
image_list = [v for v in images.values() if v is not None]
|
||||
float_list = [v if v is not None else 0.0 for v in floats.values()] if floats else []
|
||||
int_list = [v if v is not None else 0 for v in ints.values()] if ints else []
|
||||
|
||||
if not image_list:
|
||||
raise ValueError("At least one input image is required")
|
||||
|
||||
# Determine output dimensions
|
||||
if size_mode["size_mode"] == "custom":
|
||||
out_width, out_height = size_mode["width"], size_mode["height"]
|
||||
else:
|
||||
out_height, out_width = image_list[0].shape[1], image_list[0].shape[2]
|
||||
|
||||
batch_size = image_list[0].shape[0]
|
||||
uniforms = _prepare_uniforms(int_list, float_list)
|
||||
|
||||
with _gl_context(force_software=args.cpu) as ctx:
|
||||
with _shader_program(ctx, fragment_shader) as program:
|
||||
# Collect outputs for each render target across all batches
|
||||
all_outputs: list[list[torch.Tensor]] = [[] for _ in range(MAX_OUTPUTS)]
|
||||
|
||||
for b in range(batch_size):
|
||||
with _textures_context(ctx, image_list, b) as textures:
|
||||
results = _render_shader(ctx, program, out_width, out_height, textures, uniforms)
|
||||
for i, result in enumerate(results):
|
||||
all_outputs[i].append(torch.from_numpy(result))
|
||||
|
||||
# Stack batches for each output
|
||||
output_values = []
|
||||
for i in range(MAX_OUTPUTS):
|
||||
output_batch = torch.stack(all_outputs[i], dim=0)
|
||||
output_values.append(output_batch)
|
||||
|
||||
return io.NodeOutput(*output_values, ui=cls._build_ui_output(image_list, output_values[0]))
|
||||
|
||||
@classmethod
|
||||
def _build_ui_output(cls, image_list: list[torch.Tensor], output_batch: torch.Tensor) -> dict[str, list]:
|
||||
"""Build UI output with input and output images for client-side shader execution."""
|
||||
combined_inputs = torch.cat(image_list, dim=0)
|
||||
input_images_ui = ui.ImageSaveHelper.save_images(
|
||||
combined_inputs,
|
||||
filename_prefix="GLSLShader_input",
|
||||
folder_type=io.FolderType.temp,
|
||||
cls=None,
|
||||
compress_level=1,
|
||||
)
|
||||
|
||||
output_images_ui = ui.ImageSaveHelper.save_images(
|
||||
output_batch,
|
||||
filename_prefix="GLSLShader_output",
|
||||
folder_type=io.FolderType.temp,
|
||||
cls=None,
|
||||
compress_level=1,
|
||||
)
|
||||
|
||||
return {"input_images": input_images_ui, "images": output_images_ui}
|
||||
|
||||
|
||||
class GLSLExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [GLSLShader]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> GLSLExtension:
|
||||
return GLSLExtension()
|
||||
@@ -1 +1 @@
|
||||
comfyui_manager==4.0.5
|
||||
comfyui_manager==4.1b1
|
||||
|
||||
1
nodes.py
1
nodes.py
@@ -2432,6 +2432,7 @@ async def init_builtin_extra_nodes():
|
||||
"nodes_wanmove.py",
|
||||
"nodes_image_compare.py",
|
||||
"nodes_zimage.py",
|
||||
"nodes_glsl.py",
|
||||
"nodes_lora_debug.py"
|
||||
]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.37.11
|
||||
comfyui-workflow-templates==0.8.24
|
||||
comfyui-workflow-templates==0.8.27
|
||||
comfyui-embedded-docs==0.4.0
|
||||
torch
|
||||
torchsde
|
||||
@@ -29,3 +29,4 @@ kornia>=0.7.1
|
||||
spandrel
|
||||
pydantic~=2.0
|
||||
pydantic-settings~=2.0
|
||||
moderngl
|
||||
|
||||
@@ -679,6 +679,8 @@ class PromptServer():
|
||||
info['deprecated'] = True
|
||||
if getattr(obj_class, "EXPERIMENTAL", False):
|
||||
info['experimental'] = True
|
||||
if getattr(obj_class, "DEV_ONLY", False):
|
||||
info['dev_only'] = True
|
||||
|
||||
if hasattr(obj_class, 'API_NODE'):
|
||||
info['api_node'] = obj_class.API_NODE
|
||||
|
||||
Reference in New Issue
Block a user