Added code to handle diffusion feature extraction loss

This commit is contained in:
Jaret Burkett
2025-01-21 14:21:34 -07:00
parent 6a8e3d8610
commit 29122b1a54
3 changed files with 79 additions and 2 deletions

View File

@@ -32,6 +32,7 @@ from torchvision import transforms
from diffusers import EMAModel
import math
from toolkit.train_tools import precondition_model_outputs_flow_match
from toolkit.models.diffusion_feature_extraction import DiffusionFeatureExtractor, load_dfe
def flush():
@@ -78,6 +79,8 @@ class SDTrainer(BaseSDTrainProcess):
self.cached_blank_embeds: Optional[PromptEmbeds] = None
self.cached_trigger_embeds: Optional[PromptEmbeds] = None
self.dfe: Optional[DiffusionFeatureExtractor] = None
def before_model_load(self):
@@ -178,6 +181,11 @@ class SDTrainer(BaseSDTrainProcess):
# move back to cpu
self.sd.text_encoder_to('cpu')
flush()
if self.train_config.diffusion_feature_extractor_path is not None:
self.dfe = load_dfe(self.train_config.diffusion_feature_extractor_path)
self.dfe.to(self.device_torch)
self.dfe.eval()
def process_output_for_turbo(self, pred, noisy_latents, timesteps, noise, batch):
@@ -285,6 +293,7 @@ class SDTrainer(BaseSDTrainProcess):
):
loss_target = self.train_config.loss_target
is_reg = any(batch.get_is_reg_list())
additional_loss = 0.0
prior_mask_multiplier = None
target_mask_multiplier = None
@@ -367,7 +376,17 @@ class SDTrainer(BaseSDTrainProcess):
target = (noise - batch.latents).detach()
else:
target = noise
if self.dfe is not None:
# do diffusion feature extraction on target
with torch.no_grad():
rectified_flow_target = noise.float() - batch.latents.float()
target_features = self.dfe(torch.cat([rectified_flow_target, noise.float()], dim=1))
# do diffusion feature extraction on prediction
pred_features = self.dfe(torch.cat([noise_pred.float(), noise.float()], dim=1))
additional_loss += torch.nn.functional.mse_loss(pred_features, target_features, reduction="mean")
if target is None:
target = noise
@@ -487,7 +506,7 @@ class SDTrainer(BaseSDTrainProcess):
loss = loss + norm_std_loss
return loss
return loss + additional_loss
def preprocess_batch(self, batch: 'DataLoaderBatchDTO'):
return batch