Save embeddings as their trigger to match auto and comfy style loading. Also, FINALLY found why gradients were wonkey and fixed it. The root problem is dropping out of network state before backward pass.

This commit is contained in:
Jaret Burkett
2023-09-09 12:02:07 -06:00
parent 408c50ead1
commit be804c9cf5
5 changed files with 64 additions and 71 deletions

View File

@@ -36,12 +36,7 @@ class LoConSpecialModule(ToolkitModuleMixin, LoConModule):
# call super of super
torch.nn.Module.__init__(self)
# call super of
super().__init__(
org_module=org_module,
call_super_init=False,
parent=parent,
**kwargs
)
super().__init__(call_super_init=False)
self.lora_name = lora_name
self.lora_dim = lora_dim
self.cp = False