fixed huge flux training bug. Added ability to use an assistatn lora

This commit is contained in:
Jaret Burkett
2024-08-14 10:14:13 -06:00
parent e07bf11727
commit 7fed4ea761
4 changed files with 124 additions and 49 deletions

View File

@@ -263,7 +263,7 @@ class ToolkitModuleMixin:
if isinstance(x, QTensor):
x = x.dequantize()
# always cast to float32
lora_input = x.float()
lora_input = x.to(self.lora_down.weight.dtype)
lora_output = self._call_forward(lora_input)
multiplier = self.network_ref().torch_multiplier