mirror of
https://github.com/ostris/ai-toolkit.git
synced 2026-01-26 16:39:47 +00:00
Added support for full finetuning flux with randomized param activation. Examples coming soon
This commit is contained in:
@@ -389,6 +389,10 @@ class TrainConfig:
|
||||
# will cache a blank prompt or the trigger word, and unload the text encoder to cpu
|
||||
# will make training faster and use less vram
|
||||
self.unload_text_encoder = kwargs.get('unload_text_encoder', False)
|
||||
# for swapping which parameters are trained during training
|
||||
self.do_paramiter_swapping = kwargs.get('do_paramiter_swapping', False)
|
||||
# 0.1 is 10% of the parameters active at a time lower is less vram, higher is more
|
||||
self.paramiter_swapping_factor = kwargs.get('paramiter_swapping_factor', 0.1)
|
||||
|
||||
|
||||
class ModelConfig:
|
||||
@@ -898,4 +902,16 @@ class GenerateImageConfig:
|
||||
if self.logger is None:
|
||||
return
|
||||
|
||||
self.logger.log_image(image, count, self.prompt)
|
||||
self.logger.log_image(image, count, self.prompt)
|
||||
|
||||
|
||||
def validate_configs(
|
||||
train_config: TrainConfig,
|
||||
model_config: ModelConfig,
|
||||
save_config: SaveConfig,
|
||||
):
|
||||
if model_config.is_flux:
|
||||
if save_config.save_format != 'diffusers':
|
||||
# make it diffusers
|
||||
save_config.save_format = 'diffusers'
|
||||
|
||||
Reference in New Issue
Block a user