remove nonsense

This commit is contained in:
lllyasviel
2024-01-28 18:27:52 -08:00
parent 515f86ed31
commit 858481139b
4 changed files with 3 additions and 824 deletions

View File

@@ -866,464 +866,3 @@ class HandRefinerModel:
g_hand_refiner_model = HandRefinerModel()
model_free_preprocessors = [
"reference_only",
"reference_adain",
"reference_adain+attn",
"revision_clipvision",
"revision_ignore_prompt"
]
no_control_mode_preprocessors = [
"revision_clipvision",
"revision_ignore_prompt",
"clip_vision",
"ip-adapter_clip_sd15",
"ip-adapter_clip_sdxl",
"ip-adapter_clip_sdxl_plus_vith",
"t2ia_style_clipvision",
"ip-adapter_face_id",
"ip-adapter_face_id_plus",
]
flag_preprocessor_resolution = "Preprocessor Resolution"
preprocessor_sliders_config = {
"none": [],
"inpaint": [],
"inpaint_only": [],
"revision_clipvision": [
None,
{
"name": "Noise Augmentation",
"value": 0.0,
"min": 0.0,
"max": 1.0
},
],
"revision_ignore_prompt": [
None,
{
"name": "Noise Augmentation",
"value": 0.0,
"min": 0.0,
"max": 1.0
},
],
"canny": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
},
{
"name": "Canny Low Threshold",
"value": 100,
"min": 1,
"max": 255
},
{
"name": "Canny High Threshold",
"value": 200,
"min": 1,
"max": 255
},
],
"mlsd": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
},
{
"name": "MLSD Value Threshold",
"min": 0.01,
"max": 2.0,
"value": 0.1,
"step": 0.01
},
{
"name": "MLSD Distance Threshold",
"min": 0.01,
"max": 20.0,
"value": 0.1,
"step": 0.01
}
],
"hed": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"scribble_hed": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"hed_safe": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"openpose": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"openpose_full": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"dw_openpose_full": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"animal_openpose": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"segmentation": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"depth": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"depth_leres": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
},
{
"name": "Remove Near %",
"min": 0,
"max": 100,
"value": 0,
"step": 0.1,
},
{
"name": "Remove Background %",
"min": 0,
"max": 100,
"value": 0,
"step": 0.1,
}
],
"depth_leres++": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
},
{
"name": "Remove Near %",
"min": 0,
"max": 100,
"value": 0,
"step": 0.1,
},
{
"name": "Remove Background %",
"min": 0,
"max": 100,
"value": 0,
"step": 0.1,
}
],
"normal_map": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
},
{
"name": "Normal Background Threshold",
"min": 0.0,
"max": 1.0,
"value": 0.4,
"step": 0.01
}
],
"threshold": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
},
{
"name": "Binarization Threshold",
"min": 0,
"max": 255,
"value": 127
}
],
"scribble_xdog": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
},
{
"name": "XDoG Threshold",
"min": 1,
"max": 64,
"value": 32,
}
],
"blur_gaussian": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
},
{
"name": "Sigma",
"min": 0.01,
"max": 64.0,
"value": 9.0,
}
],
"tile_resample": [
None,
{
"name": "Down Sampling Rate",
"value": 1.0,
"min": 1.0,
"max": 8.0,
"step": 0.01
}
],
"tile_colorfix": [
None,
{
"name": "Variation",
"value": 8.0,
"min": 3.0,
"max": 32.0,
"step": 1.0
}
],
"tile_colorfix+sharp": [
None,
{
"name": "Variation",
"value": 8.0,
"min": 3.0,
"max": 32.0,
"step": 1.0
},
{
"name": "Sharpness",
"value": 1.0,
"min": 0.0,
"max": 2.0,
"step": 0.01
}
],
"reference_only": [
None,
{
"name": r'Style Fidelity (only for "Balanced" mode)',
"value": 0.5,
"min": 0.0,
"max": 1.0,
"step": 0.01
}
],
"reference_adain": [
None,
{
"name": r'Style Fidelity (only for "Balanced" mode)',
"value": 0.5,
"min": 0.0,
"max": 1.0,
"step": 0.01
}
],
"reference_adain+attn": [
None,
{
"name": r'Style Fidelity (only for "Balanced" mode)',
"value": 0.5,
"min": 0.0,
"max": 1.0,
"step": 0.01
}
],
"inpaint_only+lama": [],
"color": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048,
}
],
"mediapipe_face": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048,
},
{
"name": "Max Faces",
"value": 1,
"min": 1,
"max": 10,
"step": 1
},
{
"name": "Min Face Confidence",
"value": 0.5,
"min": 0.01,
"max": 1.0,
"step": 0.01
}
],
"recolor_luminance": [
None,
{
"name": "Gamma Correction",
"value": 1.0,
"min": 0.1,
"max": 2.0,
"step": 0.001
}
],
"recolor_intensity": [
None,
{
"name": "Gamma Correction",
"value": 1.0,
"min": 0.1,
"max": 2.0,
"step": 0.001
}
],
"anime_face_segment": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
}
],
"densepose": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"densepose_parula": [
{
"name": flag_preprocessor_resolution,
"min": 64,
"max": 2048,
"value": 512
}
],
"depth_hand_refiner": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
}
],
"te_hed": [
{
"name": flag_preprocessor_resolution,
"value": 512,
"min": 64,
"max": 2048
},
{
"name": "Safe Steps",
"min": 0,
"max": 10,
"value": 2,
"step": 1,
},
],
}
preprocessor_filters = {
"All": "none",
"Canny": "canny",
"Depth": "depth_midas",
"NormalMap": "normal_bae",
"OpenPose": "openpose_full",
"MLSD": "mlsd",
"Lineart": "lineart_standard (from white bg & black line)",
"SoftEdge": "softedge_pidinet",
"Scribble/Sketch": "scribble_pidinet",
"Segmentation": "seg_ofade20k",
"Shuffle": "shuffle",
"Tile/Blur": "tile_resample",
"Inpaint": "inpaint_only",
"InstructP2P": "none",
"Reference": "reference_only",
"Recolor": "recolor_luminance",
"Revision": "revision_clipvision",
"T2I-Adapter": "none",
"IP-Adapter": "ip-adapter_clip_sd15",
"Instant_ID": "instant_id",
}
preprocessor_filters_aliases = {
'instructp2p': ['ip2p'],
'segmentation': ['seg'],
'normalmap': ['normal'],
't2i-adapter': ['t2i_adapter', 't2iadapter', 't2ia'],
'ip-adapter': ['ip_adapter', 'ipadapter'],
'scribble/sketch': ['scribble', 'sketch'],
'tile/blur': ['tile', 'blur'],
'openpose':['openpose', 'densepose'],
} # must use all lower texts

View File

@@ -1,150 +0,0 @@
import os.path
import stat
import functools
from collections import OrderedDict
from modules import shared, scripts, sd_models
from legacy_preprocessors.preprocessor import *
import legacy_preprocessors.preprocessor as processor
from typing import Dict, Callable, Optional, Tuple, List
cn_preprocessor_modules = {
"none": lambda x, *args, **kwargs: (x, True),
"canny": canny,
"depth": midas,
"depth_leres": functools.partial(leres, boost=False),
"depth_leres++": functools.partial(leres, boost=True),
"depth_hand_refiner": g_hand_refiner_model.run_model,
"depth_anything": functools.partial(depth_anything, colored=False),
"hed": hed,
"hed_safe": hed_safe,
"mediapipe_face": mediapipe_face,
"mlsd": mlsd,
"normal_map": midas_normal,
"openpose": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=False),
"openpose_hand": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=False),
"openpose_face": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=True),
"openpose_faceonly": functools.partial(g_openpose_model.run_model, include_body=False, include_hand=False, include_face=True),
"openpose_full": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=True),
"dw_openpose_full": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=True, use_dw_pose=True),
"animal_openpose": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=False, use_animal_pose=True),
"clip_vision": functools.partial(clip, config='clip_vitl'),
"revision_clipvision": functools.partial(clip, config='clip_g'),
"revision_ignore_prompt": functools.partial(clip, config='clip_g'),
"ip-adapter_clip_sd15": functools.partial(clip, config='clip_h'),
"ip-adapter_clip_sdxl_plus_vith": functools.partial(clip, config='clip_h'),
"ip-adapter_clip_sdxl": functools.partial(clip, config='clip_g'),
"ip-adapter_face_id": g_insight_face_model.run_model,
"ip-adapter_face_id_plus": face_id_plus,
"instant_id_face_keypoints": functools.partial(g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=True),
"instant_id_face_embedding": functools.partial(g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=False),
"color": color,
"pidinet": pidinet,
"pidinet_safe": pidinet_safe,
"pidinet_sketch": pidinet_ts,
"pidinet_scribble": scribble_pidinet,
"scribble_xdog": scribble_xdog,
"scribble_hed": scribble_hed,
"segmentation": uniformer,
"threshold": threshold,
"depth_zoe": zoe_depth,
"normal_bae": normal_bae,
"oneformer_coco": oneformer_coco,
"oneformer_ade20k": oneformer_ade20k,
"lineart": lineart,
"lineart_coarse": lineart_coarse,
"lineart_anime": lineart_anime,
"lineart_standard": lineart_standard,
"shuffle": shuffle,
"tile_resample": tile_resample,
"invert": invert,
"lineart_anime_denoise": lineart_anime_denoise,
"reference_only": identity,
"reference_adain": identity,
"reference_adain+attn": identity,
"inpaint": identity,
"inpaint_only": identity,
"inpaint_only+lama": lama_inpaint,
"tile_colorfix": identity,
"tile_colorfix+sharp": identity,
"recolor_luminance": recolor_luminance,
"recolor_intensity": recolor_intensity,
"blur_gaussian": blur_gaussian,
"anime_face_segment": anime_face_segment,
"densepose": functools.partial(densepose, cmap="viridis"),
"densepose_parula": functools.partial(densepose, cmap="parula"),
"te_hed":te_hed,
}
cn_preprocessor_unloadable = {
"hed": unload_hed,
"fake_scribble": unload_hed,
"mlsd": unload_mlsd,
"clip_vision": functools.partial(unload_clip, config='clip_vitl'),
"revision_clipvision": functools.partial(unload_clip, config='clip_g'),
"revision_ignore_prompt": functools.partial(unload_clip, config='clip_g'),
"ip-adapter_clip_sd15": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_clip_sdxl_plus_vith": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_face_id_plus": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_clip_sdxl": functools.partial(unload_clip, config='clip_g'),
"depth": unload_midas,
"depth_leres": unload_leres,
"depth_anything": unload_depth_anything,
"normal_map": unload_midas,
"pidinet": unload_pidinet,
"openpose": g_openpose_model.unload,
"openpose_hand": g_openpose_model.unload,
"openpose_face": g_openpose_model.unload,
"openpose_full": g_openpose_model.unload,
"dw_openpose_full": g_openpose_model.unload,
"animal_openpose": g_openpose_model.unload,
"segmentation": unload_uniformer,
"depth_zoe": unload_zoe_depth,
"normal_bae": unload_normal_bae,
"oneformer_coco": unload_oneformer_coco,
"oneformer_ade20k": unload_oneformer_ade20k,
"lineart": unload_lineart,
"lineart_coarse": unload_lineart_coarse,
"lineart_anime": unload_lineart_anime,
"lineart_anime_denoise": unload_lineart_anime_denoise,
"inpaint_only+lama": unload_lama_inpaint,
"anime_face_segment": unload_anime_face_segment,
"densepose": unload_densepose,
"densepose_parula": unload_densepose,
"depth_hand_refiner": g_hand_refiner_model.unload,
"te_hed":unload_te_hed,
}
preprocessor_aliases = {
"invert": "invert (from white bg & black line)",
"lineart_standard": "lineart_standard (from white bg & black line)",
"lineart": "lineart_realistic",
"color": "t2ia_color_grid",
"clip_vision": "t2ia_style_clipvision",
"pidinet_sketch": "t2ia_sketch_pidi",
"depth": "depth_midas",
"normal_map": "normal_midas",
"hed": "softedge_hed",
"hed_safe": "softedge_hedsafe",
"pidinet": "softedge_pidinet",
"pidinet_safe": "softedge_pidisafe",
"segmentation": "seg_ufade20k",
"oneformer_coco": "seg_ofcoco",
"oneformer_ade20k": "seg_ofade20k",
"pidinet_scribble": "scribble_pidinet",
"inpaint": "inpaint_global_harmonious",
"anime_face_segment": "seg_anime_face",
"densepose": "densepose (pruple bg & purple torso)",
"densepose_parula": "densepose_parula (black bg & blue torso)",
"te_hed": "softedge_teed",
}
ui_preprocessor_keys = ['none', preprocessor_aliases['invert']]
ui_preprocessor_keys += sorted([preprocessor_aliases.get(k, k)
for k in cn_preprocessor_modules.keys()
if preprocessor_aliases.get(k, k) not in ui_preprocessor_keys])
reverse_preprocessor_aliases = {preprocessor_aliases[k]: k for k in preprocessor_aliases.keys()}

View File

@@ -1,213 +0,0 @@
# This is a python script to convert all old preprocessors to new format.
# However, the old preprocessors are not very memory effective
# and eventually we should move all old preprocessors to new format manually
# see also the forge_preprocessor_normalbae/scripts/preprocessor_normalbae for
# how to make better implementation of preprocessors.
# No newer preprocessors should be written in this legacy way.
import json
from legacy_preprocessors.preprocessor_meta import ui_preprocessor_keys, reverse_preprocessor_aliases, preprocessor_aliases
from legacy_preprocessors.preprocessor import model_free_preprocessors, no_control_mode_preprocessors, preprocessor_sliders_config, preprocessor_filters, preprocessor_filters_aliases
cn_preprocessor_modules = '''
"none": lambda x, *args, **kwargs: (x, True),
"canny": canny,
"depth": midas,
"depth_leres": functools.partial(leres, boost=False),
"depth_leres++": functools.partial(leres, boost=True),
"depth_hand_refiner": g_hand_refiner_model.run_model,
"depth_anything": functools.partial(depth_anything, colored=False),
"hed": hed,
"hed_safe": hed_safe,
"mediapipe_face": mediapipe_face,
"mlsd": mlsd,
"normal_map": midas_normal,
"openpose": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=False),
"openpose_hand": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=False),
"openpose_face": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=True),
"openpose_faceonly": functools.partial(g_openpose_model.run_model, include_body=False, include_hand=False, include_face=True),
"openpose_full": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=True),
"dw_openpose_full": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=True, include_face=True, use_dw_pose=True),
"animal_openpose": functools.partial(g_openpose_model.run_model, include_body=True, include_hand=False, include_face=False, use_animal_pose=True),
"clip_vision": functools.partial(clip, config='clip_vitl'),
"revision_clipvision": functools.partial(clip, config='clip_g'),
"revision_ignore_prompt": functools.partial(clip, config='clip_g'),
"ip-adapter_clip_sd15": functools.partial(clip, config='clip_h'),
"ip-adapter_clip_sdxl_plus_vith": functools.partial(clip, config='clip_h'),
"ip-adapter_clip_sdxl": functools.partial(clip, config='clip_g'),
"ip-adapter_face_id": g_insight_face_model.run_model,
"ip-adapter_face_id_plus": face_id_plus,
"instant_id_face_keypoints": functools.partial(g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=True),
"instant_id_face_embedding": functools.partial(g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=False),
"color": color,
"pidinet": pidinet,
"pidinet_safe": pidinet_safe,
"pidinet_sketch": pidinet_ts,
"pidinet_scribble": scribble_pidinet,
"scribble_xdog": scribble_xdog,
"scribble_hed": scribble_hed,
"segmentation": uniformer,
"threshold": threshold,
"depth_zoe": zoe_depth,
"normal_bae": normal_bae,
"oneformer_coco": oneformer_coco,
"oneformer_ade20k": oneformer_ade20k,
"lineart": lineart,
"lineart_coarse": lineart_coarse,
"lineart_anime": lineart_anime,
"lineart_standard": lineart_standard,
"shuffle": shuffle,
"tile_resample": tile_resample,
"invert": invert,
"lineart_anime_denoise": lineart_anime_denoise,
"reference_only": identity,
"reference_adain": identity,
"reference_adain+attn": identity,
"inpaint": identity,
"inpaint_only": identity,
"inpaint_only+lama": lama_inpaint,
"tile_colorfix": identity,
"tile_colorfix+sharp": identity,
"recolor_luminance": recolor_luminance,
"recolor_intensity": recolor_intensity,
"blur_gaussian": blur_gaussian,
"anime_face_segment": anime_face_segment,
"densepose": functools.partial(densepose, cmap="viridis"),
"densepose_parula": functools.partial(densepose, cmap="parula"),
"te_hed":te_hed,
'''
cn_preprocessor_unloadable = '''
"hed": unload_hed,
"fake_scribble": unload_hed,
"mlsd": unload_mlsd,
"clip_vision": functools.partial(unload_clip, config='clip_vitl'),
"revision_clipvision": functools.partial(unload_clip, config='clip_g'),
"revision_ignore_prompt": functools.partial(unload_clip, config='clip_g'),
"ip-adapter_clip_sd15": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_clip_sdxl_plus_vith": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_face_id_plus": functools.partial(unload_clip, config='clip_h'),
"ip-adapter_clip_sdxl": functools.partial(unload_clip, config='clip_g'),
"depth": unload_midas,
"depth_leres": unload_leres,
"depth_anything": unload_depth_anything,
"normal_map": unload_midas,
"pidinet": unload_pidinet,
"openpose": g_openpose_model.unload,
"openpose_hand": g_openpose_model.unload,
"openpose_face": g_openpose_model.unload,
"openpose_full": g_openpose_model.unload,
"dw_openpose_full": g_openpose_model.unload,
"animal_openpose": g_openpose_model.unload,
"segmentation": unload_uniformer,
"depth_zoe": unload_zoe_depth,
"normal_bae": unload_normal_bae,
"oneformer_coco": unload_oneformer_coco,
"oneformer_ade20k": unload_oneformer_ade20k,
"lineart": unload_lineart,
"lineart_coarse": unload_lineart_coarse,
"lineart_anime": unload_lineart_anime,
"lineart_anime_denoise": unload_lineart_anime_denoise,
"inpaint_only+lama": unload_lama_inpaint,
"anime_face_segment": unload_anime_face_segment,
"densepose": unload_densepose,
"densepose_parula": unload_densepose,
"depth_hand_refiner": g_hand_refiner_model.unload,
"te_hed":unload_te_hed,
'''
def compile_first_round(text):
lines = text.splitlines()
dd = {}
for l in lines:
sp = l.split('":')
if len(sp) == 2:
k, v = sp
k = k.strip(' ",')
v = v.strip(' ",')
dd[k] = v
return dd
cn_preprocessor_modules = compile_first_round(cn_preprocessor_modules)
cn_preprocessor_unloadable = compile_first_round(cn_preprocessor_unloadable)
def special_get(d, k, default=None):
k1 = k
k2 = preprocessor_aliases.get(k, k)
k3 = reverse_preprocessor_aliases.get(k, k)
for pk in [k1, k2, k3]:
if pk in d:
return d[pk]
return default
def special_judge_in(d, k):
k1 = k
k2 = preprocessor_aliases.get(k, k)
k3 = reverse_preprocessor_aliases.get(k, k)
for pk in [k1, k2, k3]:
if pk in d:
return True
return False
legacy_preprocessors = {}
for name in ui_preprocessor_keys:
call_function = special_get(cn_preprocessor_modules, name, None)
assert call_function is not None
unload_function = special_get(cn_preprocessor_unloadable, name, 'None')
model_free = special_judge_in(model_free_preprocessors, name)
no_control_mode = special_judge_in(no_control_mode_preprocessors, name)
slider_config = special_get(preprocessor_sliders_config, name, [])
resolution = slider_config[0] if len(slider_config) > 0 else None
slider_1 = slider_config[1] if len(slider_config) > 1 else None
slider_2 = slider_config[2] if len(slider_config) > 2 else None
slider_3 = slider_config[3] if len(slider_config) > 3 else None
legacy_preprocessors[name] = dict(
name=name,
call_function='***' + call_function + '***',
unload_function='***' + unload_function + '***',
managed_model='***None***',
model_free=model_free,
no_control_mode=no_control_mode,
resolution=resolution,
slider_1=slider_1,
slider_2=slider_2,
slider_3=slider_3,
priority=0,
tags=[]
)
for tag, best in preprocessor_filters.items():
bp = special_get(legacy_preprocessors, best, None)
if bp is not None:
bp['priority'] = 100
for tag, best in preprocessor_filters.items():
marks = [tag.lower()] + preprocessor_filters_aliases.get(tag.lower(), [])
for k, p in legacy_preprocessors.items():
if any(x.lower() in k.lower() for x in marks):
p['tags'] = [tag]
compiled_filename = __file__.replace('compiler', 'compiled')
with open(compiled_filename, 'wt') as fp:
result = json.dumps(legacy_preprocessors, indent=4).replace('null', 'None')\
.replace('false', 'False').replace('true', 'True').replace('***"', '').replace('"***', '').replace('\\"', '"').replace('"Balanced"', 'Balanced')
fp.write('import functools\nfrom legacy_preprocessors.preprocessor import *\n\n\nlegacy_preprocessors = ' + result)
print('ok')

View File

@@ -7,3 +7,6 @@ from modules_forge.shared import Preprocessor, PreprocessorParameter, preprocess
# how to make better implementation of preprocessors.
# No newer preprocessors should be written in this legacy way.
from scripts.preprocessor_compiled import legacy_preprocessors
a = 0