Fixed issue with adapters not providing gradients with new grad activator

This commit is contained in:
Jaret Burkett
2024-10-29 14:22:10 -06:00
parent 22cd40d7b9
commit 4747716867
3 changed files with 25 additions and 16 deletions

View File

@@ -1161,13 +1161,13 @@ class IPAdapter(torch.nn.Module):
# when training just scaler, we do not train anything else
if not self.config.train_scaler:
param_groups.append({
"params": self.get_non_scaler_parameters(),
"params": list(self.get_non_scaler_parameters()),
"lr": adapter_lr,
})
if self.config.train_scaler or self.config.merge_scaler:
scaler_lr = adapter_lr if self.config.scaler_lr is None else self.config.scaler_lr
param_groups.append({
"params": self.get_scaler_parameters(),
"params": list(self.get_scaler_parameters()),
"lr": scaler_lr,
})
return param_groups