From c0314ba3255c33736419d31723d9158097ac6334 Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Sat, 14 Jun 2025 12:24:00 -0600 Subject: [PATCH 01/13] Fixed some issues with training mean flow algo. Still testing WIP --- extensions_built_in/sd_trainer/SDTrainer.py | 5 +++-- toolkit/models/flux.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/extensions_built_in/sd_trainer/SDTrainer.py b/extensions_built_in/sd_trainer/SDTrainer.py index 6e1daf39..f3813133 100644 --- a/extensions_built_in/sd_trainer/SDTrainer.py +++ b/extensions_built_in/sd_trainer/SDTrainer.py @@ -135,9 +135,9 @@ class SDTrainer(BaseSDTrainProcess): def hook_before_train_loop(self): super().hook_before_train_loop() - if self.train_config.timestep_type == "mean_flow": + if self.train_config.loss_type == "mean_flow": # todo handle non flux models - convert_flux_to_mean_flow(self.sd.transformer) + convert_flux_to_mean_flow(self.sd.unet) if self.train_config.do_prior_divergence: self.do_prior_prediction = True @@ -811,6 +811,7 @@ class SDTrainer(BaseSDTrainProcess): base_eps, base_eps + jitter ) + # eps = (t_frac - r_frac) / 2 # eps = 1e-3 # primary prediction (needs grad) diff --git a/toolkit/models/flux.py b/toolkit/models/flux.py index 42194179..5d3064f4 100644 --- a/toolkit/models/flux.py +++ b/toolkit/models/flux.py @@ -179,11 +179,15 @@ def add_model_gpu_splitter_to_flux( def mean_flow_time_text_embed_forward(self:CombinedTimestepTextProjEmbeddings, timestep, pooled_projection): # make zero timestep ending if none is passed - if timestep.shape[0] == pooled_projection.shape[0] // 2: - timestep = torch.cat([timestep, timestep], dim=0) # timestep - 0 (final timestep) == same as start timestep + if timestep.shape[0] == pooled_projection.shape[0]: + timestep = torch.cat([timestep, torch.zeros_like(timestep)], dim=0) # timestep - 0 (final timestep) == same as start timestep timesteps_proj = self.time_proj(timestep) - timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) + timesteps_emb_combo = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) + + timesteps_emb_start, timesteps_emb_end = timesteps_emb_combo.chunk(2, dim=0) + + timesteps_emb = timesteps_emb_start + timesteps_emb_end pooled_projections = self.text_embedder(pooled_projection) @@ -193,8 +197,8 @@ def mean_flow_time_text_embed_forward(self:CombinedTimestepTextProjEmbeddings, t def mean_flow_time_text_guidance_embed_forward(self: CombinedTimestepGuidanceTextProjEmbeddings, timestep, guidance, pooled_projection): # make zero timestep ending if none is passed - if timestep.shape[0] == pooled_projection.shape[0] // 2: - timestep = torch.cat([timestep, timestep], dim=0) + if timestep.shape[0] == pooled_projection.shape[0]: + timestep = torch.cat([timestep, torch.zeros_like(timestep)], dim=0) # timestep - 0 (final timestep) == same as start timestep timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) From 1c2b7298dd29a70294ca28b0eda4c241af6b0ad9 Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Mon, 16 Jun 2025 07:17:35 -0600 Subject: [PATCH 02/13] More work on mean flow loss. Moved it to an adapter. Still not functioning properly though. --- extensions_built_in/sd_trainer/SDTrainer.py | 101 ------- jobs/process/BaseSDTrainProcess.py | 6 +- toolkit/custom_adapter.py | 40 ++- toolkit/models/control_lora_adapter.py | 2 +- toolkit/models/flux.py | 57 ---- toolkit/models/mean_flow_adapter.py | 282 ++++++++++++++++++++ 6 files changed, 323 insertions(+), 165 deletions(-) create mode 100644 toolkit/models/mean_flow_adapter.py diff --git a/extensions_built_in/sd_trainer/SDTrainer.py b/extensions_built_in/sd_trainer/SDTrainer.py index f3813133..a0de8cd1 100644 --- a/extensions_built_in/sd_trainer/SDTrainer.py +++ b/extensions_built_in/sd_trainer/SDTrainer.py @@ -36,7 +36,6 @@ from toolkit.train_tools import precondition_model_outputs_flow_match from toolkit.models.diffusion_feature_extraction import DiffusionFeatureExtractor, load_dfe from toolkit.util.wavelet_loss import wavelet_loss import torch.nn.functional as F -from toolkit.models.flux import convert_flux_to_mean_flow def flush(): @@ -135,9 +134,6 @@ class SDTrainer(BaseSDTrainProcess): def hook_before_train_loop(self): super().hook_before_train_loop() - if self.train_config.loss_type == "mean_flow": - # todo handle non flux models - convert_flux_to_mean_flow(self.sd.unet) if self.train_config.do_prior_divergence: self.do_prior_prediction = True @@ -634,102 +630,6 @@ class SDTrainer(BaseSDTrainProcess): return loss - # ------------------------------------------------------------------ - # Mean-Flow loss (Geng et al., “Mean Flows for One-step Generative - # Modelling”, 2025 – see Alg. 1 + Eq. (6) of the paper) - # This version avoids jvp / double-back-prop issues with Flash-Attention - # adapted from the work of lodestonerock - # ------------------------------------------------------------------ - def get_mean_flow_loss_wip( - self, - noisy_latents: torch.Tensor, - conditional_embeds: PromptEmbeds, - match_adapter_assist: bool, - network_weight_list: list, - timesteps: torch.Tensor, - pred_kwargs: dict, - batch: 'DataLoaderBatchDTO', - noise: torch.Tensor, - unconditional_embeds: Optional[PromptEmbeds] = None, - **kwargs - ): - batch_latents = batch.latents.to(self.device_torch, dtype=get_torch_dtype(self.train_config.dtype)) - - - time_end = timesteps.float() / 1000 - # for timestep_r, we need values from timestep_end to 0.0 randomly - time_origin = torch.rand_like(time_end, device=self.device_torch, dtype=time_end.dtype) * time_end - - # time_origin = torch.zeros_like(time_end, device=self.device_torch, dtype=time_end.dtype) - # Compute noised data points - # lerp_vector = noisy_latents - # compute instantaneous vector - instantaneous_vector = noise - batch_latents - - # finite difference method - epsilon_fd = 1e-3 - jitter_std = 1e-4 - epsilon_jittered = epsilon_fd + torch.randn(1, device=batch_latents.device) * jitter_std - epsilon_jittered = torch.clamp(epsilon_jittered, min=1e-4) - - # f(x + epsilon * v) for the primal (we backprop through here) - # mean_vec_val_pred = self.forward(lerp_vector, class_label) - mean_vec_val_pred = self.predict_noise( - noisy_latents=noisy_latents, - timesteps=torch.cat([time_end, time_origin], dim=0) * 1000, - conditional_embeds=conditional_embeds, - unconditional_embeds=unconditional_embeds, - batch=batch, - **pred_kwargs - ) - - with torch.no_grad(): - perturbed_time_end = torch.clamp(time_end + epsilon_jittered, 0.0, 1.0) - # intermediate vector to compute tangent approximation f(x + epsilon * v) ! NO GRAD HERE! - perturbed_lerp_vector = noisy_latents + epsilon_jittered * instantaneous_vector - # f_x_plus_eps_v = self.forward(perturbed_lerp_vector, class_label) - f_x_plus_eps_v = self.predict_noise( - noisy_latents=perturbed_lerp_vector, - timesteps=torch.cat([perturbed_time_end, time_origin], dim=0) * 1000, - conditional_embeds=conditional_embeds, - unconditional_embeds=unconditional_embeds, - batch=batch, - **pred_kwargs - ) - - # JVP approximation: (f(x + epsilon * v) - f(x)) / epsilon - mean_vec_grad_fd = (f_x_plus_eps_v - mean_vec_val_pred) / epsilon_jittered - mean_vec_grad = mean_vec_grad_fd - - - # calculate the regression target the mean vector - time_difference_broadcast = (time_end - time_origin)[:, None, None, None] - mean_vec_target = instantaneous_vector - time_difference_broadcast * mean_vec_grad - - # 5) MSE loss - loss = torch.nn.functional.mse_loss( - mean_vec_val_pred.float(), - mean_vec_target.float(), - reduction='none' - ) - with torch.no_grad(): - pure_loss = loss.mean().detach() - # add grad to pure_loss so it can be backwards without issues - pure_loss.requires_grad_(True) - # normalize the loss per batch element to 1.0 - # this method has large loss swings that can hurt the model. This method will prevent that - with torch.no_grad(): - loss_mean = loss.mean([1, 2, 3], keepdim=True) - loss = loss / loss_mean - loss = loss.mean() - - # backward the pure loss for logging - self.accelerator.backward(loss) - - # return the real loss for logging - return pure_loss - - # ------------------------------------------------------------------ # Mean-Flow loss (Geng et al., “Mean Flows for One-step Generative # Modelling”, 2025 – see Alg. 1 + Eq. (6) of the paper) @@ -811,7 +711,6 @@ class SDTrainer(BaseSDTrainProcess): base_eps, base_eps + jitter ) - # eps = (t_frac - r_frac) / 2 # eps = 1e-3 # primary prediction (needs grad) diff --git a/jobs/process/BaseSDTrainProcess.py b/jobs/process/BaseSDTrainProcess.py index 064e89fc..1e0393c0 100644 --- a/jobs/process/BaseSDTrainProcess.py +++ b/jobs/process/BaseSDTrainProcess.py @@ -575,10 +575,8 @@ class BaseSDTrainProcess(BaseTrainProcess): direct_save = False if self.adapter_config.train_only_image_encoder: direct_save = True - if self.adapter_config.type == 'redux': - direct_save = True - if self.adapter_config.type in ['control_lora', 'subpixel', 'i2v']: - direct_save = True + elif isinstance(self.adapter, CustomAdapter): + direct_save = self.adapter.do_direct_save save_ip_adapter_from_diffusers( state_dict, output_file=file_path, diff --git a/toolkit/custom_adapter.py b/toolkit/custom_adapter.py index a9ec5d10..cc58c5b5 100644 --- a/toolkit/custom_adapter.py +++ b/toolkit/custom_adapter.py @@ -11,6 +11,7 @@ from toolkit.data_transfer_object.data_loader import DataLoaderBatchDTO from toolkit.models.clip_fusion import CLIPFusionModule from toolkit.models.clip_pre_processor import CLIPImagePreProcessor from toolkit.models.control_lora_adapter import ControlLoraAdapter +from toolkit.models.mean_flow_adapter import MeanFlowAdapter from toolkit.models.i2v_adapter import I2VAdapter from toolkit.models.subpixel_adapter import SubpixelAdapter from toolkit.models.ilora import InstantLoRAModule @@ -98,6 +99,7 @@ class CustomAdapter(torch.nn.Module): self.single_value_adapter: SingleValueAdapter = None self.redux_adapter: ReduxImageEncoder = None self.control_lora: ControlLoraAdapter = None + self.mean_flow_adapter: MeanFlowAdapter = None self.subpixel_adapter: SubpixelAdapter = None self.i2v_adapter: I2VAdapter = None @@ -125,6 +127,16 @@ class CustomAdapter(torch.nn.Module): dtype=self.sd_ref().dtype, ) self.load_state_dict(loaded_state_dict, strict=False) + + @property + def do_direct_save(self): + # some adapters save their weights directly, others like ip adapters split the state dict + if self.config.train_only_image_encoder: + return True + if self.config.type in ['control_lora', 'subpixel', 'i2v', 'redux', 'mean_flow']: + return True + return False + def setup_adapter(self): torch_dtype = get_torch_dtype(self.sd_ref().dtype) @@ -245,6 +257,13 @@ class CustomAdapter(torch.nn.Module): elif self.adapter_type == 'redux': vision_hidden_size = self.vision_encoder.config.hidden_size self.redux_adapter = ReduxImageEncoder(vision_hidden_size, 4096, self.device, torch_dtype) + elif self.adapter_type == 'mean_flow': + self.mean_flow_adapter = MeanFlowAdapter( + self, + sd=self.sd_ref(), + config=self.config, + train_config=self.train_config + ) elif self.adapter_type == 'control_lora': self.control_lora = ControlLoraAdapter( self, @@ -309,7 +328,7 @@ class CustomAdapter(torch.nn.Module): def setup_clip(self): adapter_config = self.config sd = self.sd_ref() - if self.config.type in ["text_encoder", "llm_adapter", "single_value", "control_lora", "subpixel"]: + if self.config.type in ["text_encoder", "llm_adapter", "single_value", "control_lora", "subpixel", "mean_flow"]: return if self.config.type == 'photo_maker': try: @@ -528,6 +547,14 @@ class CustomAdapter(torch.nn.Module): new_dict[k + '.' + k2] = v2 self.control_lora.load_weights(new_dict, strict=strict) + if self.adapter_type == 'mean_flow': + # state dict is seperated. so recombine it + new_dict = {} + for k, v in state_dict.items(): + for k2, v2 in v.items(): + new_dict[k + '.' + k2] = v2 + self.mean_flow_adapter.load_weights(new_dict, strict=strict) + if self.adapter_type == 'i2v': # state dict is seperated. so recombine it new_dict = {} @@ -599,6 +626,11 @@ class CustomAdapter(torch.nn.Module): for k, v in d.items(): state_dict[k] = v return state_dict + elif self.adapter_type == 'mean_flow': + d = self.mean_flow_adapter.get_state_dict() + for k, v in d.items(): + state_dict[k] = v + return state_dict elif self.adapter_type == 'i2v': d = self.i2v_adapter.get_state_dict() for k, v in d.items(): @@ -757,7 +789,7 @@ class CustomAdapter(torch.nn.Module): prompt: Union[List[str], str], is_unconditional: bool = False, ): - if self.adapter_type in ['clip_fusion', 'ilora', 'vision_direct', 'redux', 'control_lora', 'subpixel', 'i2v']: + if self.adapter_type in ['clip_fusion', 'ilora', 'vision_direct', 'redux', 'control_lora', 'subpixel', 'i2v', 'mean_flow']: return prompt elif self.adapter_type == 'text_encoder': # todo allow for training @@ -1319,6 +1351,10 @@ class CustomAdapter(torch.nn.Module): param_list = self.control_lora.get_params() for param in param_list: yield param + elif self.config.type == 'mean_flow': + param_list = self.mean_flow_adapter.get_params() + for param in param_list: + yield param elif self.config.type == 'i2v': param_list = self.i2v_adapter.get_params() for param in param_list: diff --git a/toolkit/models/control_lora_adapter.py b/toolkit/models/control_lora_adapter.py index 3588302d..38147ea9 100644 --- a/toolkit/models/control_lora_adapter.py +++ b/toolkit/models/control_lora_adapter.py @@ -135,7 +135,7 @@ class ControlLoraAdapter(torch.nn.Module): network_kwargs = {} if self.network_config.network_kwargs is None else self.network_config.network_kwargs if hasattr(sd, 'target_lora_modules'): - network_kwargs['target_lin_modules'] = self.sd.target_lora_modules + network_kwargs['target_lin_modules'] = sd.target_lora_modules if 'ignore_if_contains' not in network_kwargs: network_kwargs['ignore_if_contains'] = [] diff --git a/toolkit/models/flux.py b/toolkit/models/flux.py index 5d3064f4..0241ce2f 100644 --- a/toolkit/models/flux.py +++ b/toolkit/models/flux.py @@ -176,60 +176,3 @@ def add_model_gpu_splitter_to_flux( transformer._pre_gpu_split_to = transformer.to transformer.to = partial(new_device_to, transformer) - -def mean_flow_time_text_embed_forward(self:CombinedTimestepTextProjEmbeddings, timestep, pooled_projection): - # make zero timestep ending if none is passed - if timestep.shape[0] == pooled_projection.shape[0]: - timestep = torch.cat([timestep, torch.zeros_like(timestep)], dim=0) # timestep - 0 (final timestep) == same as start timestep - - timesteps_proj = self.time_proj(timestep) - timesteps_emb_combo = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) - - timesteps_emb_start, timesteps_emb_end = timesteps_emb_combo.chunk(2, dim=0) - - timesteps_emb = timesteps_emb_start + timesteps_emb_end - - pooled_projections = self.text_embedder(pooled_projection) - - conditioning = timesteps_emb + pooled_projections - - return conditioning - -def mean_flow_time_text_guidance_embed_forward(self: CombinedTimestepGuidanceTextProjEmbeddings, timestep, guidance, pooled_projection): - # make zero timestep ending if none is passed - if timestep.shape[0] == pooled_projection.shape[0]: - timestep = torch.cat([timestep, torch.zeros_like(timestep)], dim=0) # timestep - 0 (final timestep) == same as start timestep - timesteps_proj = self.time_proj(timestep) - timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) - - guidance_proj = self.time_proj(guidance) - guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) - - timesteps_emb_start, timesteps_emb_end = timesteps_emb.chunk(2, dim=0) - - time_guidance_emb = timesteps_emb_start + timesteps_emb_end + guidance_emb - - pooled_projections = self.text_embedder(pooled_projection) - conditioning = time_guidance_emb + pooled_projections - - return conditioning - - -def convert_flux_to_mean_flow( - transformer: FluxTransformer2DModel, -): - if isinstance(transformer.time_text_embed, CombinedTimestepTextProjEmbeddings): - transformer.time_text_embed.forward = partial( - mean_flow_time_text_embed_forward, transformer.time_text_embed - ) - elif isinstance(transformer.time_text_embed, CombinedTimestepGuidanceTextProjEmbeddings): - transformer.time_text_embed.forward = partial( - mean_flow_time_text_guidance_embed_forward, transformer.time_text_embed - ) - else: - raise ValueError( - "Unsupported time_text_embed type: {}".format( - type(transformer.time_text_embed) - ) - ) - \ No newline at end of file diff --git a/toolkit/models/mean_flow_adapter.py b/toolkit/models/mean_flow_adapter.py new file mode 100644 index 00000000..1c5a6c15 --- /dev/null +++ b/toolkit/models/mean_flow_adapter.py @@ -0,0 +1,282 @@ +import inspect +import weakref +import torch +from typing import TYPE_CHECKING +from toolkit.lora_special import LoRASpecialNetwork +from diffusers import FluxTransformer2DModel +from diffusers.models.embeddings import ( + CombinedTimestepTextProjEmbeddings, + CombinedTimestepGuidanceTextProjEmbeddings, +) +from functools import partial + + +if TYPE_CHECKING: + from toolkit.stable_diffusion_model import StableDiffusion + from toolkit.config_modules import AdapterConfig, TrainConfig, ModelConfig + from toolkit.custom_adapter import CustomAdapter + + +def mean_flow_time_text_embed_forward( + self: CombinedTimestepTextProjEmbeddings, timestep, pooled_projection +): + mean_flow_adapter: "MeanFlowAdapter" = self.mean_flow_adapter_ref() + # make zero timestep ending if none is passed + if mean_flow_adapter.is_active and timestep.shape[0] == pooled_projection.shape[0]: + timestep = torch.cat( + [timestep, torch.zeros_like(timestep)], dim=0 + ) # timestep - 0 (final timestep) == same as start timestep + + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder( + timesteps_proj.to(dtype=pooled_projection.dtype) + ) # (N, D) + + # mean flow stuff + if mean_flow_adapter.is_active: + # todo make sure that timesteps is batched correctly, I think diffusers expects non batched timesteps + orig_dtype = timesteps_emb.dtype + timesteps_emb = timesteps_emb.to(torch.float32) + timesteps_emb_start, timesteps_emb_end = timesteps_emb.chunk(2, dim=0) + timesteps_emb = mean_flow_adapter.mean_flow_timestep_embedder( + torch.cat([timesteps_emb_start, timesteps_emb_end], dim=-1) + ) + timesteps_emb = timesteps_emb.to(orig_dtype) + + pooled_projections = self.text_embedder(pooled_projection) + + conditioning = timesteps_emb + pooled_projections + + return conditioning + + +def mean_flow_time_text_guidance_embed_forward( + self: CombinedTimestepGuidanceTextProjEmbeddings, + timestep, + guidance, + pooled_projection, +): + mean_flow_adapter: "MeanFlowAdapter" = self.mean_flow_adapter_ref() + # make zero timestep ending if none is passed + if mean_flow_adapter.is_active and timestep.shape[0] == pooled_projection.shape[0]: + timestep = torch.cat( + [timestep, torch.zeros_like(timestep)], dim=0 + ) # timestep - 0 (final timestep) == same as start timestep + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder( + timesteps_proj.to(dtype=pooled_projection.dtype) + ) # (N, D) + + guidance_proj = self.time_proj(guidance) + guidance_emb = self.guidance_embedder( + guidance_proj.to(dtype=pooled_projection.dtype) + ) # (N, D) + + # mean flow stuff + if mean_flow_adapter.is_active: + # todo make sure that timesteps is batched correctly, I think diffusers expects non batched timesteps + orig_dtype = timesteps_emb.dtype + timesteps_emb = timesteps_emb.to(torch.float32) + timesteps_emb_start, timesteps_emb_end = timesteps_emb.chunk(2, dim=0) + timesteps_emb = mean_flow_adapter.mean_flow_timestep_embedder( + torch.cat([timesteps_emb_start, timesteps_emb_end], dim=-1) + ) + timesteps_emb = timesteps_emb.to(orig_dtype) + + time_guidance_emb = timesteps_emb + guidance_emb + + pooled_projections = self.text_embedder(pooled_projection) + conditioning = time_guidance_emb + pooled_projections + + return conditioning + + +def convert_flux_to_mean_flow( + transformer: FluxTransformer2DModel, +): + if isinstance(transformer.time_text_embed, CombinedTimestepTextProjEmbeddings): + transformer.time_text_embed.forward = partial( + mean_flow_time_text_embed_forward, transformer.time_text_embed + ) + elif isinstance( + transformer.time_text_embed, CombinedTimestepGuidanceTextProjEmbeddings + ): + transformer.time_text_embed.forward = partial( + mean_flow_time_text_guidance_embed_forward, transformer.time_text_embed + ) + else: + raise ValueError( + "Unsupported time_text_embed type: {}".format( + type(transformer.time_text_embed) + ) + ) + + +class MeanFlowAdapter(torch.nn.Module): + def __init__( + self, + adapter: "CustomAdapter", + sd: "StableDiffusion", + config: "AdapterConfig", + train_config: "TrainConfig", + ): + super().__init__() + self.adapter_ref: weakref.ref = weakref.ref(adapter) + self.sd_ref = weakref.ref(sd) + self.model_config: ModelConfig = sd.model_config + self.network_config = config.lora_config + self.train_config = train_config + self.device_torch = sd.device_torch + self.lora = None + + if self.network_config is not None: + network_kwargs = ( + {} + if self.network_config.network_kwargs is None + else self.network_config.network_kwargs + ) + if hasattr(sd, "target_lora_modules"): + network_kwargs["target_lin_modules"] = sd.target_lora_modules + + if "ignore_if_contains" not in network_kwargs: + network_kwargs["ignore_if_contains"] = [] + + self.lora = LoRASpecialNetwork( + text_encoder=sd.text_encoder, + unet=sd.unet, + lora_dim=self.network_config.linear, + multiplier=1.0, + alpha=self.network_config.linear_alpha, + train_unet=self.train_config.train_unet, + train_text_encoder=self.train_config.train_text_encoder, + conv_lora_dim=self.network_config.conv, + conv_alpha=self.network_config.conv_alpha, + is_sdxl=self.model_config.is_xl or self.model_config.is_ssd, + is_v2=self.model_config.is_v2, + is_v3=self.model_config.is_v3, + is_pixart=self.model_config.is_pixart, + is_auraflow=self.model_config.is_auraflow, + is_flux=self.model_config.is_flux, + is_lumina2=self.model_config.is_lumina2, + is_ssd=self.model_config.is_ssd, + is_vega=self.model_config.is_vega, + dropout=self.network_config.dropout, + use_text_encoder_1=self.model_config.use_text_encoder_1, + use_text_encoder_2=self.model_config.use_text_encoder_2, + use_bias=False, + is_lorm=False, + network_config=self.network_config, + network_type=self.network_config.type, + transformer_only=self.network_config.transformer_only, + is_transformer=sd.is_transformer, + base_model=sd, + **network_kwargs, + ) + self.lora.force_to(self.device_torch, dtype=torch.float32) + self.lora._update_torch_multiplier() + self.lora.apply_to( + sd.text_encoder, + sd.unet, + self.train_config.train_text_encoder, + self.train_config.train_unet, + ) + self.lora.can_merge_in = False + self.lora.prepare_grad_etc(sd.text_encoder, sd.unet) + if self.train_config.gradient_checkpointing: + self.lora.enable_gradient_checkpointing() + + emb_dim = None + if self.model_config.arch in ["flux", "flex2", "flex2"]: + transformer: FluxTransformer2DModel = sd.unet + emb_dim = ( + transformer.config.num_attention_heads + * transformer.config.attention_head_dim + ) + convert_flux_to_mean_flow(transformer) + else: + raise ValueError(f"Unsupported architecture: {self.model_config.arch}") + + self.mean_flow_timestep_embedder = torch.nn.Linear( + emb_dim * 2, + emb_dim, + ) + + # make the model function as before adding this adapter by initializing the weights + with torch.no_grad(): + self.mean_flow_timestep_embedder.weight.zero_() + self.mean_flow_timestep_embedder.weight[:, :emb_dim] = torch.eye(emb_dim) + self.mean_flow_timestep_embedder.bias.zero_() + + self.mean_flow_timestep_embedder.to(self.device_torch) + + # add our adapter as a weak ref + if self.model_config.arch in ["flux", "flex2", "flex2"]: + sd.unet.time_text_embed.mean_flow_adapter_ref = weakref.ref(self) + + def get_params(self): + if self.lora is not None: + config = { + "text_encoder_lr": self.train_config.lr, + "unet_lr": self.train_config.lr, + } + sig = inspect.signature(self.lora.prepare_optimizer_params) + if "default_lr" in sig.parameters: + config["default_lr"] = self.train_config.lr + if "learning_rate" in sig.parameters: + config["learning_rate"] = self.train_config.lr + params_net = self.lora.prepare_optimizer_params(**config) + + # we want only tensors here + params = [] + for p in params_net: + if isinstance(p, dict): + params += p["params"] + elif isinstance(p, torch.Tensor): + params.append(p) + elif isinstance(p, list): + params += p + else: + params = [] + + # make sure the embedder is float32 + self.mean_flow_timestep_embedder.to(torch.float32) + self.mean_flow_timestep_embedder.requires_grad = True + self.mean_flow_timestep_embedder.train() + + params += list(self.mean_flow_timestep_embedder.parameters()) + + # we need to be able to yield from the list like yield from params + + return params + + def load_weights(self, state_dict, strict=True): + lora_sd = {} + mean_flow_embedder_sd = {} + for key, value in state_dict.items(): + if "mean_flow_timestep_embedder" in key: + new_key = key.replace("transformer.mean_flow_timestep_embedder.", "") + mean_flow_embedder_sd[new_key] = value + else: + lora_sd[key] = value + + # todo process state dict before loading for models that need it + if self.lora is not None: + self.lora.load_weights(lora_sd) + self.mean_flow_timestep_embedder.load_state_dict( + mean_flow_embedder_sd, strict=False + ) + + def get_state_dict(self): + if self.lora is not None: + lora_sd = self.lora.get_state_dict(dtype=torch.float32) + else: + lora_sd = {} + # todo make sure we match loras elseware. + mean_flow_embedder_sd = self.mean_flow_timestep_embedder.state_dict() + for key, value in mean_flow_embedder_sd.items(): + lora_sd[f"transformer.mean_flow_timestep_embedder.{key}"] = value + return lora_sd + + @property + def is_active(self): + return self.adapter_ref().is_active From 11f2eee53aa9b0b8c0e89543ced9ca048683eb1b Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Mon, 16 Jun 2025 07:18:43 -0600 Subject: [PATCH 03/13] Hide control images from ui image viewer --- ui/src/app/api/datasets/listImages/route.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/app/api/datasets/listImages/route.ts b/ui/src/app/api/datasets/listImages/route.ts index 55a11057..06dca84a 100644 --- a/ui/src/app/api/datasets/listImages/route.ts +++ b/ui/src/app/api/datasets/listImages/route.ts @@ -45,7 +45,7 @@ function findImagesRecursively(dir: string): string[] { const itemPath = path.join(dir, item); const stat = fs.statSync(itemPath); - if (stat.isDirectory()) { + if (stat.isDirectory() && item !== '_controls' && !item.startsWith('.')) { // If it's a directory, recursively search it results = results.concat(findImagesRecursively(itemPath)); } else { From 1cc663a66466040b4822c4d896484411df5d1b53 Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Tue, 17 Jun 2025 07:37:41 -0600 Subject: [PATCH 04/13] Performance optimizations for pre processing the batch --- README.md | 10 ++ extensions_built_in/sd_trainer/SDTrainer.py | 138 ++++++++++---------- jobs/process/BaseSDTrainProcess.py | 62 ++++----- toolkit/stable_diffusion_model.py | 9 ++ 4 files changed, 120 insertions(+), 99 deletions(-) diff --git a/README.md b/README.md index fa660c6a..5b4ae070 100644 --- a/README.md +++ b/README.md @@ -417,6 +417,16 @@ Everything else should work the same including layer targeting. ## Updates +### June 17, 2024 +- Performance optimizations for batch preparation + +### June 16, 2024 +- Hide control images in the UI when viewing datasets +- WIP on mean flow loss + +### June 12, 2024 +- Fixed issue that resulted in blank captions in the dataloader + ### June 10, 2024 - Decided to keep track up updates in the readme - Added support for SDXL in the UI diff --git a/extensions_built_in/sd_trainer/SDTrainer.py b/extensions_built_in/sd_trainer/SDTrainer.py index a0de8cd1..479b4141 100644 --- a/extensions_built_in/sd_trainer/SDTrainer.py +++ b/extensions_built_in/sd_trainer/SDTrainer.py @@ -1009,76 +1009,76 @@ class SDTrainer(BaseSDTrainProcess): return loss def train_single_accumulation(self, batch: DataLoaderBatchDTO): - self.timer.start('preprocess_batch') - if isinstance(self.adapter, CustomAdapter): - batch = self.adapter.edit_batch_raw(batch) - batch = self.preprocess_batch(batch) - if isinstance(self.adapter, CustomAdapter): - batch = self.adapter.edit_batch_processed(batch) - dtype = get_torch_dtype(self.train_config.dtype) - # sanity check - if self.sd.vae.dtype != self.sd.vae_torch_dtype: - self.sd.vae = self.sd.vae.to(self.sd.vae_torch_dtype) - if isinstance(self.sd.text_encoder, list): - for encoder in self.sd.text_encoder: - if encoder.dtype != self.sd.te_torch_dtype: - encoder.to(self.sd.te_torch_dtype) - else: - if self.sd.text_encoder.dtype != self.sd.te_torch_dtype: - self.sd.text_encoder.to(self.sd.te_torch_dtype) - - noisy_latents, noise, timesteps, conditioned_prompts, imgs = self.process_general_training_batch(batch) - if self.train_config.do_cfg or self.train_config.do_random_cfg: - # pick random negative prompts - if self.negative_prompt_pool is not None: - negative_prompts = [] - for i in range(noisy_latents.shape[0]): - num_neg = random.randint(1, self.train_config.max_negative_prompts) - this_neg_prompts = [random.choice(self.negative_prompt_pool) for _ in range(num_neg)] - this_neg_prompt = ', '.join(this_neg_prompts) - negative_prompts.append(this_neg_prompt) - self.batch_negative_prompt = negative_prompts - else: - self.batch_negative_prompt = ['' for _ in range(batch.latents.shape[0])] - - if self.adapter and isinstance(self.adapter, CustomAdapter): - # condition the prompt - # todo handle more than one adapter image - conditioned_prompts = self.adapter.condition_prompt(conditioned_prompts) - - network_weight_list = batch.get_network_weight_list() - if self.train_config.single_item_batching: - network_weight_list = network_weight_list + network_weight_list - - has_adapter_img = batch.control_tensor is not None - has_clip_image = batch.clip_image_tensor is not None - has_clip_image_embeds = batch.clip_image_embeds is not None - # force it to be true if doing regs as we handle those differently - if any([batch.file_items[idx].is_reg for idx in range(len(batch.file_items))]): - has_clip_image = True - if self._clip_image_embeds_unconditional is not None: - has_clip_image_embeds = True # we are caching embeds, handle that differently - has_clip_image = False - - if self.adapter is not None and isinstance(self.adapter, IPAdapter) and not has_clip_image and has_adapter_img: - raise ValueError( - "IPAdapter control image is now 'clip_image_path' instead of 'control_path'. Please update your dataset config ") - - match_adapter_assist = False - - # check if we are matching the adapter assistant - if self.assistant_adapter: - if self.train_config.match_adapter_chance == 1.0: - match_adapter_assist = True - elif self.train_config.match_adapter_chance > 0.0: - match_adapter_assist = torch.rand( - (1,), device=self.device_torch, dtype=dtype - ) < self.train_config.match_adapter_chance - - self.timer.stop('preprocess_batch') - - is_reg = False with torch.no_grad(): + self.timer.start('preprocess_batch') + if isinstance(self.adapter, CustomAdapter): + batch = self.adapter.edit_batch_raw(batch) + batch = self.preprocess_batch(batch) + if isinstance(self.adapter, CustomAdapter): + batch = self.adapter.edit_batch_processed(batch) + dtype = get_torch_dtype(self.train_config.dtype) + # sanity check + if self.sd.vae.dtype != self.sd.vae_torch_dtype: + self.sd.vae = self.sd.vae.to(self.sd.vae_torch_dtype) + if isinstance(self.sd.text_encoder, list): + for encoder in self.sd.text_encoder: + if encoder.dtype != self.sd.te_torch_dtype: + encoder.to(self.sd.te_torch_dtype) + else: + if self.sd.text_encoder.dtype != self.sd.te_torch_dtype: + self.sd.text_encoder.to(self.sd.te_torch_dtype) + + noisy_latents, noise, timesteps, conditioned_prompts, imgs = self.process_general_training_batch(batch) + if self.train_config.do_cfg or self.train_config.do_random_cfg: + # pick random negative prompts + if self.negative_prompt_pool is not None: + negative_prompts = [] + for i in range(noisy_latents.shape[0]): + num_neg = random.randint(1, self.train_config.max_negative_prompts) + this_neg_prompts = [random.choice(self.negative_prompt_pool) for _ in range(num_neg)] + this_neg_prompt = ', '.join(this_neg_prompts) + negative_prompts.append(this_neg_prompt) + self.batch_negative_prompt = negative_prompts + else: + self.batch_negative_prompt = ['' for _ in range(batch.latents.shape[0])] + + if self.adapter and isinstance(self.adapter, CustomAdapter): + # condition the prompt + # todo handle more than one adapter image + conditioned_prompts = self.adapter.condition_prompt(conditioned_prompts) + + network_weight_list = batch.get_network_weight_list() + if self.train_config.single_item_batching: + network_weight_list = network_weight_list + network_weight_list + + has_adapter_img = batch.control_tensor is not None + has_clip_image = batch.clip_image_tensor is not None + has_clip_image_embeds = batch.clip_image_embeds is not None + # force it to be true if doing regs as we handle those differently + if any([batch.file_items[idx].is_reg for idx in range(len(batch.file_items))]): + has_clip_image = True + if self._clip_image_embeds_unconditional is not None: + has_clip_image_embeds = True # we are caching embeds, handle that differently + has_clip_image = False + + if self.adapter is not None and isinstance(self.adapter, IPAdapter) and not has_clip_image and has_adapter_img: + raise ValueError( + "IPAdapter control image is now 'clip_image_path' instead of 'control_path'. Please update your dataset config ") + + match_adapter_assist = False + + # check if we are matching the adapter assistant + if self.assistant_adapter: + if self.train_config.match_adapter_chance == 1.0: + match_adapter_assist = True + elif self.train_config.match_adapter_chance > 0.0: + match_adapter_assist = torch.rand( + (1,), device=self.device_torch, dtype=dtype + ) < self.train_config.match_adapter_chance + + self.timer.stop('preprocess_batch') + + is_reg = False loss_multiplier = torch.ones((noisy_latents.shape[0], 1, 1, 1), device=self.device_torch, dtype=dtype) for idx, file_item in enumerate(batch.file_items): if file_item.is_reg: diff --git a/jobs/process/BaseSDTrainProcess.py b/jobs/process/BaseSDTrainProcess.py index 1e0393c0..313285fc 100644 --- a/jobs/process/BaseSDTrainProcess.py +++ b/jobs/process/BaseSDTrainProcess.py @@ -921,7 +921,10 @@ class BaseSDTrainProcess(BaseTrainProcess): noise = self.get_consistent_noise(latents, batch, dtype=dtype) else: if hasattr(self.sd, 'get_latent_noise_from_latents'): - noise = self.sd.get_latent_noise_from_latents(latents).to(self.device_torch, dtype=dtype) + noise = self.sd.get_latent_noise_from_latents( + latents, + noise_offset=self.train_config.noise_offset + ).to(self.device_torch, dtype=dtype) else: # get noise noise = self.sd.get_latent_noise( @@ -931,17 +934,6 @@ class BaseSDTrainProcess(BaseTrainProcess): batch_size=batch_size, noise_offset=self.train_config.noise_offset, ).to(self.device_torch, dtype=dtype) - - # if self.train_config.random_noise_shift > 0.0: - # # get random noise -1 to 1 - # noise_shift = torch.rand((noise.shape[0], noise.shape[1], 1, 1), device=noise.device, - # dtype=noise.dtype) * 2 - 1 - - # # multiply by shift amount - # noise_shift *= self.train_config.random_noise_shift - - # # add to noise - # noise += noise_shift if self.train_config.blended_blur_noise: noise = get_blended_blur_noise( @@ -1085,19 +1077,20 @@ class BaseSDTrainProcess(BaseTrainProcess): # we determine noise from the differential of the latents unaugmented_latents = self.sd.encode_images(batch.unaugmented_tensor) - batch_size = len(batch.file_items) - min_noise_steps = self.train_config.min_denoising_steps - max_noise_steps = self.train_config.max_denoising_steps - if self.model_config.refiner_name_or_path is not None: - # if we are not training the unet, then we are only doing refiner and do not need to double up - if self.train_config.train_unet: - max_noise_steps = round(self.train_config.max_denoising_steps * self.model_config.refiner_start_at) - do_double = True - else: - min_noise_steps = round(self.train_config.max_denoising_steps * self.model_config.refiner_start_at) - do_double = False + with self.timer('prepare_scheduler'): + + batch_size = len(batch.file_items) + min_noise_steps = self.train_config.min_denoising_steps + max_noise_steps = self.train_config.max_denoising_steps + if self.model_config.refiner_name_or_path is not None: + # if we are not training the unet, then we are only doing refiner and do not need to double up + if self.train_config.train_unet: + max_noise_steps = round(self.train_config.max_denoising_steps * self.model_config.refiner_start_at) + do_double = True + else: + min_noise_steps = round(self.train_config.max_denoising_steps * self.model_config.refiner_start_at) + do_double = False - with self.timer('prepare_noise'): num_train_timesteps = self.train_config.num_train_timesteps if self.train_config.noise_scheduler in ['custom_lcm']: @@ -1144,6 +1137,7 @@ class BaseSDTrainProcess(BaseTrainProcess): self.sd.noise_scheduler.set_timesteps( num_train_timesteps, device=self.device_torch ) + with self.timer('prepare_timesteps_indices'): content_or_style = self.train_config.content_or_style if is_reg: @@ -1193,20 +1187,26 @@ class BaseSDTrainProcess(BaseTrainProcess): timestep_indices = torch.ones((batch_size,), device=self.device_torch) * min_noise_steps else: # todo, some schedulers use indices, otheres use timesteps. Not sure what to do here + min_idx = min_noise_steps + 1 + max_idx = max_noise_steps - 1 + if self.train_config.noise_scheduler == 'flowmatch': + # flowmatch uses indices, so we need to use indices + min_idx = 0 + max_idx = max_noise_steps - 1 timestep_indices = torch.randint( - min_noise_steps + 1, - max_noise_steps - 1, + min_idx, + max_idx, (batch_size,), device=self.device_torch ) timestep_indices = timestep_indices.long() else: raise ValueError(f"Unknown content_or_style {content_or_style}") - + with self.timer('convert_timestep_indices_to_timesteps'): # convert the timestep_indices to a timestep - timesteps = [self.sd.noise_scheduler.timesteps[x.item()] for x in timestep_indices] - timesteps = torch.stack(timesteps, dim=0) - + timesteps = self.sd.noise_scheduler.timesteps[timestep_indices.long()] + + with self.timer('prepare_noise'): # get noise noise = self.get_noise(latents, batch_size, dtype=dtype, batch=batch, timestep=timesteps) @@ -1240,6 +1240,8 @@ class BaseSDTrainProcess(BaseTrainProcess): device=noise.device, dtype=noise.dtype ) * self.train_config.random_noise_multiplier + + with self.timer('make_noisy_latents'): noise = noise * noise_multiplier diff --git a/toolkit/stable_diffusion_model.py b/toolkit/stable_diffusion_model.py index 068e747e..4bffc816 100644 --- a/toolkit/stable_diffusion_model.py +++ b/toolkit/stable_diffusion_model.py @@ -1763,6 +1763,15 @@ class StableDiffusion: ) noise = apply_noise_offset(noise, noise_offset) return noise + + def get_latent_noise_from_latents( + self, + latents: torch.Tensor, + noise_offset=0.0 + ): + noise = torch.randn_like(latents) + noise = apply_noise_offset(noise, noise_offset) + return noise def get_time_ids_from_latents(self, latents: torch.Tensor, requires_aesthetic_score=False): VAE_SCALE_FACTOR = 2 ** (len(self.vae.config['block_out_channels']) - 1) From 595a6f1735e46700703adcad8848d5d9ede20bde Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Tue, 17 Jun 2025 07:43:34 -0600 Subject: [PATCH 05/13] Initial setup for a cron working on the ui for various tasks --- ui/cron/worker.ts | 31 ++ ui/package-lock.json | 654 +++++++++++++++++++++++++++++++++++++++- ui/package.json | 8 +- ui/prisma/schema.prisma | 10 + ui/tsconfig.worker.json | 15 + 5 files changed, 709 insertions(+), 9 deletions(-) create mode 100644 ui/cron/worker.ts create mode 100644 ui/tsconfig.worker.json diff --git a/ui/cron/worker.ts b/ui/cron/worker.ts new file mode 100644 index 00000000..589393a4 --- /dev/null +++ b/ui/cron/worker.ts @@ -0,0 +1,31 @@ +class CronWorker { + interval: number; + is_running: boolean; + intervalId: NodeJS.Timeout; + constructor() { + this.interval = 1000; // Default interval of 1 second + this.is_running = false; + this.intervalId = setInterval(() => { + this.run(); + }, this.interval); + } + async run() { + if (this.is_running) { + return; + } + this.is_running = true; + try { + // Loop logic here + await this.loop(); + } catch (error) { + console.error('Error in cron worker loop:', error); + } + this.is_running = false; + } + + async loop() {} +} + +// it automatically starts the loop +const cronWorker = new CronWorker(); +console.log('Cron worker started with interval:', cronWorker.interval, 'ms'); diff --git a/ui/package-lock.json b/ui/package-lock.json index f20ef7a4..6bebedce 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -30,10 +30,12 @@ "@types/node": "^20", "@types/react": "^19", "@types/react-dom": "^19", + "concurrently": "^9.1.2", "postcss": "^8", "prettier": "^3.5.1", "prettier-basic": "^1.0.0", "tailwindcss": "^3.4.1", + "ts-node-dev": "^2.0.0", "typescript": "^5" } }, @@ -169,6 +171,28 @@ "node": ">=6.9.0" } }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@emnapi/runtime": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz", @@ -1168,6 +1192,30 @@ "node": ">= 6" } }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, "node_modules/@types/node": { "version": "20.17.19", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.19.tgz", @@ -1207,6 +1255,18 @@ "@types/react": "*" } }, + "node_modules/@types/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-xevGOReSYGM7g/kUBZzPqCrR/KYAo+F0yiPc85WFTJa0MSLtyFTVTU6cJu/aV4mid7IffDIWqo69THF2o4JiEQ==", + "dev": true + }, + "node_modules/@types/strip-json-comments": { + "version": "0.0.30", + "resolved": "https://registry.npmjs.org/@types/strip-json-comments/-/strip-json-comments-0.0.30.tgz", + "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", + "dev": true + }, "node_modules/abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", @@ -1214,6 +1274,30 @@ "license": "ISC", "optional": true }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/agent-base": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", @@ -1465,6 +1549,12 @@ "ieee754": "^1.1.13" } }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, "node_modules/busboy": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", @@ -1626,6 +1716,49 @@ } ] }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/chokidar": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", @@ -1691,6 +1824,93 @@ "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", @@ -1782,8 +2002,33 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "license": "MIT", - "optional": true + "devOptional": true, + "license": "MIT" + }, + "node_modules/concurrently": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-9.1.2.tgz", + "integrity": "sha512-H9MWcoPsYddwbOGM6difjVwVZHl63nwMEwDJG/L7VGtuaJhb12h2caPG2tVPWs7emuYix252iGfqOyrz1GczTQ==", + "dev": true, + "dependencies": { + "chalk": "^4.1.2", + "lodash": "^4.17.21", + "rxjs": "^7.8.1", + "shell-quote": "^1.8.1", + "supports-color": "^8.1.1", + "tree-kill": "^1.2.2", + "yargs": "^17.7.2" + }, + "bin": { + "conc": "dist/bin/concurrently.js", + "concurrently": "dist/bin/concurrently.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/open-cli-tools/concurrently?sponsor=1" + } }, "node_modules/console-control-strings": { "version": "1.1.0", @@ -1820,6 +2065,12 @@ "node": ">= 6" } }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -1921,6 +2172,15 @@ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", "dev": true }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/dlv": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", @@ -1949,6 +2209,15 @@ "node": ">= 0.4" } }, + "node_modules/dynamic-dedupe": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/dynamic-dedupe/-/dynamic-dedupe-0.3.0.tgz", + "integrity": "sha512-ssuANeD+z97meYOqd50e04Ze5qp4bPqo8cCkI4TRjZkzAUgIDTrXV1R8QCdINpiI+hw14+rYazvTRdQrz0/rFQ==", + "dev": true, + "dependencies": { + "xtend": "^4.0.0" + } + }, "node_modules/eastasianwidth": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", @@ -2051,6 +2320,15 @@ "node": ">= 0.4" } }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -2225,8 +2503,8 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "license": "ISC", - "optional": true + "devOptional": true, + "license": "ISC" }, "node_modules/fsevents": { "version": "2.3.3", @@ -2322,6 +2600,15 @@ "node": ">=8" } }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, "node_modules/get-intrinsic": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.7.tgz", @@ -2421,6 +2708,15 @@ "license": "ISC", "optional": true }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", @@ -2598,8 +2894,8 @@ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "devOptional": true, "license": "ISC", - "optional": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2784,6 +3080,12 @@ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -2810,6 +3112,12 @@ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, "node_modules/make-fetch-happen": { "version": "9.1.0", "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", @@ -3499,8 +3807,8 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "devOptional": true, "license": "MIT", - "optional": true, "engines": { "node": ">=0.10.0" } @@ -4004,6 +4312,15 @@ "node": ">=8.10.0" } }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve": { "version": "1.22.10", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", @@ -4137,6 +4454,15 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -4247,6 +4573,18 @@ "node": ">=8" } }, + "node_modules/shell-quote": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz", + "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/signal-exit": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", @@ -4370,6 +4708,25 @@ "node": ">=0.10.0" } }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/sprintf-js": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", @@ -4545,6 +4902,15 @@ "node": ">=8" } }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", @@ -4603,6 +4969,21 @@ "node": ">=16 || 14 >=14.17" } }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", @@ -4749,12 +5130,172 @@ "node": ">=8.0" } }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "dev": true, + "bin": { + "tree-kill": "cli.js" + } + }, "node_modules/ts-interface-checker": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", "dev": true }, + "node_modules/ts-node-dev": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-node-dev/-/ts-node-dev-2.0.0.tgz", + "integrity": "sha512-ywMrhCfH6M75yftYvrvNarLEY+SUXtUvU8/0Z6llrHQVBx12GiFk5sStF8UdfE/yfzk9IAq7O5EEbTQsxlBI8w==", + "dev": true, + "dependencies": { + "chokidar": "^3.5.1", + "dynamic-dedupe": "^0.3.0", + "minimist": "^1.2.6", + "mkdirp": "^1.0.4", + "resolve": "^1.0.0", + "rimraf": "^2.6.1", + "source-map-support": "^0.5.12", + "tree-kill": "^1.2.2", + "ts-node": "^10.4.0", + "tsconfig": "^7.0.0" + }, + "bin": { + "ts-node-dev": "lib/bin.js", + "tsnd": "lib/bin.js" + }, + "engines": { + "node": ">=0.8.0" + }, + "peerDependencies": { + "node-notifier": "*", + "typescript": "*" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/ts-node-dev/node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, + "node_modules/ts-node-dev/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/ts-node-dev/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ts-node-dev/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ts-node-dev/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/ts-node-dev/node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/tsconfig": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/tsconfig/-/tsconfig-7.0.0.tgz", + "integrity": "sha512-vZXmzPrL+EmC4T/4rVlT2jNVMWCi/O4DIiSj3UHg1OE5kCKbk4mfrXc6dZksLgRM/TZlKnousKH9bbTazUWRRw==", + "dev": true, + "dependencies": { + "@types/strip-bom": "^3.0.0", + "@types/strip-json-comments": "0.0.30", + "strip-bom": "^3.0.0", + "strip-json-comments": "^2.0.0" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", @@ -4829,6 +5370,12 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -4996,6 +5543,24 @@ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "license": "ISC" }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", @@ -5012,6 +5577,83 @@ "engines": { "node": ">= 14" } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "engines": { + "node": ">=6" + } } } } diff --git a/ui/package.json b/ui/package.json index 43e0d861..de04bd2b 100644 --- a/ui/package.json +++ b/ui/package.json @@ -3,9 +3,9 @@ "version": "0.1.0", "private": true, "scripts": { - "dev": "next dev --turbopack", - "build": "next build", - "start": "next start --port 8675", + "dev": "concurrently -k -n WORKER,UI \"ts-node-dev --respawn --watch cron --transpile-only cron/worker.ts\" \"next dev --turbopack\"", + "build": "tsc -p tsconfig.worker.json && next build", + "start": "concurrently --restart-tries -1 --restart-after 1000 -n WORKER,UI \"node dist/worker.js\" \"next start --port 8675\"", "build_and_start": "npm install && npm run update_db && npm run build && npm run start", "lint": "next lint", "update_db": "npx prisma generate && npx prisma db push", @@ -34,10 +34,12 @@ "@types/node": "^20", "@types/react": "^19", "@types/react-dom": "^19", + "concurrently": "^9.1.2", "postcss": "^8", "prettier": "^3.5.1", "prettier-basic": "^1.0.0", "tailwindcss": "^3.4.1", + "ts-node-dev": "^2.0.0", "typescript": "^5" }, "prettier": "prettier-basic" diff --git a/ui/prisma/schema.prisma b/ui/prisma/schema.prisma index 1489e26e..96f6399b 100644 --- a/ui/prisma/schema.prisma +++ b/ui/prisma/schema.prisma @@ -26,3 +26,13 @@ model Job { info String @default("") speed_string String @default("") } + +model Queue { + id String @id @default(uuid()) + channel String + job_id String + created_at DateTime @default(now()) + updated_at DateTime @updatedAt + status String @default("waiting") + @@index([job_id, channel]) +} \ No newline at end of file diff --git a/ui/tsconfig.worker.json b/ui/tsconfig.worker.json new file mode 100644 index 00000000..6b4d9531 --- /dev/null +++ b/ui/tsconfig.worker.json @@ -0,0 +1,15 @@ +{ + // tsconfig.worker.json + "compilerOptions": { + "module": "commonjs", + "target": "es2020", + "outDir": "dist", + "moduleResolution": "node", + "types": [ + "node" + ] + }, + "include": [ + "cron/**/*.ts" + ] +} \ No newline at end of file From ff617fdaea3edc2a87a7151fba05e8599980407d Mon Sep 17 00:00:00 2001 From: Jaret Burkett Date: Tue, 17 Jun 2025 11:00:24 -0600 Subject: [PATCH 06/13] Started doing info bubble docs on the simple ui --- README.md | 1 + ui/src/app/jobs/new/SimpleJob.tsx | 34 +++++++++-------- ui/src/app/layout.tsx | 2 + ui/src/components/DocModal.tsx | 59 +++++++++++++++++++++++++++++ ui/src/components/formInputs.tsx | 62 +++++++++++++++++++++++++++---- ui/src/docs.tsx | 60 ++++++++++++++++++++++++++++++ ui/src/types.ts | 9 ++++- 7 files changed, 201 insertions(+), 26 deletions(-) create mode 100644 ui/src/components/DocModal.tsx create mode 100644 ui/src/docs.tsx diff --git a/README.md b/README.md index 5b4ae070..f91dcd13 100644 --- a/README.md +++ b/README.md @@ -419,6 +419,7 @@ Everything else should work the same including layer targeting. ### June 17, 2024 - Performance optimizations for batch preparation +- Added some docs via a popup for items in the simple ui explaining what settings do. Still a WIP ### June 16, 2024 - Hide control images in the UI when viewing datasets diff --git a/ui/src/app/jobs/new/SimpleJob.tsx b/ui/src/app/jobs/new/SimpleJob.tsx index ca587564..0b8b3e47 100644 --- a/ui/src/app/jobs/new/SimpleJob.tsx +++ b/ui/src/app/jobs/new/SimpleJob.tsx @@ -47,6 +47,7 @@ export default function SimpleJob({ setJobConfig(value, 'config.name')} placeholder="Enter training name" disabled={runId !== null} @@ -55,12 +56,14 @@ export default function SimpleJob({ setGpuIDs(value)} options={gpuList.map((gpu: any) => ({ value: `${gpu.index}`, label: `GPU #${gpu.index}` }))} /> { if (value?.trim() === '') { value = null; @@ -120,6 +123,7 @@ export default function SimpleJob({ { if (value?.trim() === '') { value = null; @@ -185,22 +189,20 @@ export default function SimpleJob({ max={1024} required /> - { - modelArch?.disableSections?.includes('network.conv') ? null : ( - { - console.log('onChange', value); - setJobConfig(value, 'config.process[0].network.conv'); - setJobConfig(value, 'config.process[0].network.conv_alpha'); - }} - placeholder="eg. 16" - min={0} - max={1024} - /> - ) - } + {modelArch?.disableSections?.includes('network.conv') ? null : ( + { + console.log('onChange', value); + setJobConfig(value, 'config.process[0].network.conv'); + setJobConfig(value, 'config.process[0].network.conv_alpha'); + }} + placeholder="eg. 16" + min={0} + max={1024} + /> + )} )} diff --git a/ui/src/app/layout.tsx b/ui/src/app/layout.tsx index 50f8d79a..e45363f9 100644 --- a/ui/src/app/layout.tsx +++ b/ui/src/app/layout.tsx @@ -7,6 +7,7 @@ import ConfirmModal from '@/components/ConfirmModal'; import SampleImageModal from '@/components/SampleImageModal'; import { Suspense } from 'react'; import AuthWrapper from '@/components/AuthWrapper'; +import DocModal from '@/components/DocModal'; export const dynamic = 'force-dynamic'; @@ -38,6 +39,7 @@ export default function RootLayout({ children }: { children: React.ReactNode }) + diff --git a/ui/src/components/DocModal.tsx b/ui/src/components/DocModal.tsx new file mode 100644 index 00000000..bfdd6bf4 --- /dev/null +++ b/ui/src/components/DocModal.tsx @@ -0,0 +1,59 @@ +'use client'; +import { createGlobalState } from 'react-global-hooks'; +import { Dialog, DialogBackdrop, DialogPanel, DialogTitle } from '@headlessui/react'; +import React from 'react'; +import { ConfigDoc } from '@/types'; + +export const docState = createGlobalState(null); + +export const openDoc = (doc: ConfigDoc) => { + docState.set({ ...doc }); +}; + +export default function DocModal() { + const [doc, setDoc] = docState.use(); + const isOpen = !!doc; + + const onClose = () => { + setDoc(null); + }; + + return ( + + + +
+
+ +
+
+
+ + {doc?.title || 'Confirm Action'} + +
{doc?.description}
+
+
+
+
+ +
+
+
+
+
+ ); +} diff --git a/ui/src/components/formInputs.tsx b/ui/src/components/formInputs.tsx index a9908bd0..a556a266 100644 --- a/ui/src/components/formInputs.tsx +++ b/ui/src/components/formInputs.tsx @@ -3,6 +3,10 @@ import React, { forwardRef } from 'react'; import classNames from 'classnames'; import dynamic from 'next/dynamic'; +import { CircleHelp } from 'lucide-react'; +import { getDoc } from '@/docs'; +import { openDoc } from '@/components/DocModal'; + const Select = dynamic(() => import('react-select'), { ssr: false }); const labelClasses = 'block text-xs mb-1 mt-2 text-gray-300'; @@ -11,6 +15,7 @@ const inputClasses = export interface InputProps { label?: string; + docKey?: string; className?: string; placeholder?: string; required?: boolean; @@ -24,10 +29,20 @@ export interface TextInputProps extends InputProps { } export const TextInput = forwardRef( - ({ label, value, onChange, placeholder, required, disabled, type = 'text', className }, ref) => { + ({ label, value, onChange, placeholder, required, disabled, type = 'text', className, docKey = null }, ref) => { + const doc = getDoc(docKey); return (
- {label && } + {label && ( + + )} { - const { label, value, onChange, placeholder, required, min, max } = props; + const { label, value, onChange, placeholder, required, min, max, docKey = null } = props; + const doc = getDoc(docKey); // Add controlled internal state to properly handle partial inputs const [inputValue, setInputValue] = React.useState(value ?? ''); @@ -68,7 +84,16 @@ export const NumberInput = (props: NumberInputProps) => { return (
- {label && } + {label && ( + + )} { - const { label, value, onChange, options } = props; + const { label, value, onChange, options, docKey = null } = props; + const doc = getDoc(docKey); const selectedOption = options.find(option => option.value === value); return (
{ 'opacity-30 cursor-not-allowed': props.disabled, })} > - {label && } + {label && ( + + )}