Cleanup and small bug fixes

This commit is contained in:
Jaret Burkett
2023-08-29 05:45:49 -06:00
parent a008d9e63b
commit bd758ff203
3 changed files with 15 additions and 2 deletions

View File

@@ -177,6 +177,14 @@ class BaseSDTrainProcess(BaseTrainProcess):
)
o_dict['ss_output_name'] = self.job.name
if self.trigger_word is not None:
# just so auto1111 will pick it up
o_dict['ss_tag_frequency'] = {
'actfig': {
'actfig': 1
}
}
self.add_meta(o_dict)
def get_training_info(self):

View File

@@ -213,9 +213,12 @@ class LoRAModule(torch.nn.Module):
device = state_dict['lora_up.weight'].device
# todo should we do this at fp32?
if isinstance(self.normalize_scaler, torch.Tensor):
scaler = self.normalize_scaler.clone().detach()
else:
scaler = torch.tensor(self.normalize_scaler).to(device, dtype=dtype)
total_module_scale = torch.tensor(self.normalize_scaler / target_normalize_scaler) \
.to(device, dtype=dtype)
total_module_scale = scaler / target_normalize_scaler
num_modules_layers = 2 # up and down
up_down_scale = torch.pow(total_module_scale, 1.0 / num_modules_layers) \
.to(device, dtype=dtype)

View File

@@ -35,6 +35,8 @@ def get_optimizer(
if use_lr < 0.1:
# dadaptation uses different lr that is values of 0.1 to 1.0. default to 1.0
use_lr = 1.0
print(f"Using lr {use_lr}")
# let net be the neural network you want to train
# you can choose weight decay value based on your problem, 0 by default
optimizer = Prodigy(params, lr=use_lr, **optimizer_params)