Fixed a bug where samples would fail if merging in lora on sampling for unquantized models. Quantize non ARA modules as uint8 when using an ARA

This commit is contained in:
Jaret Burkett
2025-08-25 09:21:40 -06:00
parent f48d21caee
commit ea01a1c7d0
2 changed files with 14 additions and 1 deletions

View File

@@ -1145,6 +1145,8 @@ class StableDiffusion:
# the network to drastically speed up inference
unique_network_weights = set([x.network_multiplier for x in image_configs])
if len(unique_network_weights) == 1 and network.can_merge_in:
# make sure it is on device before merging.
self.unet.to(self.device_torch)
can_merge_in = True
merge_multiplier = unique_network_weights.pop()
network.merge_in(merge_weight=merge_multiplier)