Added support for full finetuning flux with randomized param activation. Examples coming soon

This commit is contained in:
Jaret Burkett
2024-11-21 13:05:32 -07:00
parent 894374b2e9
commit 96d418bb95
4 changed files with 194 additions and 8 deletions

View File

@@ -389,6 +389,10 @@ class TrainConfig:
# will cache a blank prompt or the trigger word, and unload the text encoder to cpu
# will make training faster and use less vram
self.unload_text_encoder = kwargs.get('unload_text_encoder', False)
# for swapping which parameters are trained during training
self.do_paramiter_swapping = kwargs.get('do_paramiter_swapping', False)
# 0.1 is 10% of the parameters active at a time lower is less vram, higher is more
self.paramiter_swapping_factor = kwargs.get('paramiter_swapping_factor', 0.1)
class ModelConfig:
@@ -898,4 +902,16 @@ class GenerateImageConfig:
if self.logger is None:
return
self.logger.log_image(image, count, self.prompt)
self.logger.log_image(image, count, self.prompt)
def validate_configs(
train_config: TrainConfig,
model_config: ModelConfig,
save_config: SaveConfig,
):
if model_config.is_flux:
if save_config.save_format != 'diffusers':
# make it diffusers
save_config.save_format = 'diffusers'