diff --git a/extensions_built_in/sd_trainer/SDTrainer.py b/extensions_built_in/sd_trainer/SDTrainer.py index a4b1c2d..a9ea4e4 100644 --- a/extensions_built_in/sd_trainer/SDTrainer.py +++ b/extensions_built_in/sd_trainer/SDTrainer.py @@ -1086,6 +1086,7 @@ class SDTrainer(BaseSDTrainProcess): is_training=True, has_been_preprocessed=True, quad_count=quad_count, + batch_size=noisy_latents.shape[0] ) with self.timer('encode_prompt'): diff --git a/toolkit/custom_adapter.py b/toolkit/custom_adapter.py index eaf443a..a414e69 100644 --- a/toolkit/custom_adapter.py +++ b/toolkit/custom_adapter.py @@ -705,10 +705,11 @@ class CustomAdapter(torch.nn.Module): is_training=False, has_been_preprocessed=False, quad_count=4, + batch_size=1, ) -> PromptEmbeds: if self.adapter_type == 'ilora' or self.adapter_type == 'vision_direct' or self.adapter_type == 'te_augmenter': if tensors_0_1 is None: - tensors_0_1 = self.get_empty_clip_image(1) + tensors_0_1 = self.get_empty_clip_image(batch_size) has_been_preprocessed = True with torch.no_grad(): diff --git a/toolkit/models/ilora.py b/toolkit/models/ilora.py index a41e815..6bfbf0f 100644 --- a/toolkit/models/ilora.py +++ b/toolkit/models/ilora.py @@ -78,7 +78,13 @@ class InstantLoRAMidModule(torch.nn.Module): raise e # apply tanh to limit values to -1 to 1 # scaler = torch.tanh(scaler) - return x * scaler + try: + return x * scaler + except Exception as e: + print(e) + print(x.shape) + print(scaler.shape) + raise e class InstantLoRAModule(torch.nn.Module): diff --git a/toolkit/stable_diffusion_model.py b/toolkit/stable_diffusion_model.py index adf91d3..aaf04cd 100644 --- a/toolkit/stable_diffusion_model.py +++ b/toolkit/stable_diffusion_model.py @@ -39,7 +39,7 @@ from toolkit.pipelines import CustomStableDiffusionXLPipeline, CustomStableDiffu StableDiffusionKDiffusionXLPipeline, StableDiffusionXLRefinerPipeline from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, T2IAdapter, DDPMScheduler, \ StableDiffusionXLAdapterPipeline, StableDiffusionAdapterPipeline, DiffusionPipeline, \ - StableDiffusionXLImg2ImgPipeline, LCMScheduler, Transformer2DModel, AutoencoderTiny + StableDiffusionXLImg2ImgPipeline, LCMScheduler, Transformer2DModel, AutoencoderTiny, ControlNetModel import diffusers from diffusers import \ AutoencoderKL, \ @@ -142,7 +142,7 @@ class StableDiffusion: # to hold network if there is one self.network = None - self.adapter: Union['T2IAdapter', 'IPAdapter', 'ReferenceAdapter', None] = None + self.adapter: Union['ControlNetModel', 'T2IAdapter', 'IPAdapter', 'ReferenceAdapter', None] = None self.is_xl = model_config.is_xl self.is_v2 = model_config.is_v2 self.is_ssd = model_config.is_ssd