Added flux training instructions

This commit is contained in:
Jaret Burkett
2024-08-10 14:10:02 -06:00
parent b3e03295ad
commit 2308ef2868
4 changed files with 128 additions and 23 deletions

View File

@@ -128,7 +128,7 @@ class NetworkConfig:
if self.lorm_config.do_conv:
self.conv = 4
self.transformer_only = kwargs.get('transformer_only', False)
self.transformer_only = kwargs.get('transformer_only', True)
AdapterTypes = Literal['t2i', 'ip', 'ip+', 'clip', 'ilora', 'photo_maker', 'control_net']

View File

@@ -1349,7 +1349,6 @@ class FluxWithCFGPipeline(FluxPipeline):
noise_pred_text = self.transformer(
hidden_states=latents,
# YiYi notes: divide it by 1000 for now because we scale it by 1000 in the transforme rmodel (we should not keep it but I want to keep the inputs same for the model for testing)
timestep=timestep / 1000,
guidance=guidance,
pooled_projections=pooled_prompt_embeds,
@@ -1363,7 +1362,6 @@ class FluxWithCFGPipeline(FluxPipeline):
# todo combine these
noise_pred_uncond = self.transformer(
hidden_states=latents,
# YiYi notes: divide it by 1000 for now because we scale it by 1000 in the transforme rmodel (we should not keep it but I want to keep the inputs same for the model for testing)
timestep=timestep / 1000,
guidance=guidance,
pooled_projections=negative_pooled_prompt_embeds,
@@ -1376,8 +1374,6 @@ class FluxWithCFGPipeline(FluxPipeline):
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents_dtype = latents.dtype
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]