diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index b5ffd219..4bd65111 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -266,10 +266,18 @@ def get_torch_device_name(device): return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) try: - print("Device:", get_torch_device_name(get_torch_device())) + torch_device_name = get_torch_device_name(get_torch_device()) + print("Device:", torch_device_name) except: + torch_device_name = '' print("Could not pick default device.") +if 'rtx' in torch_device_name.lower(): + if not args.pin_shared_memory: + print('Hint: your device supports --pin-shared-memory for potential speed improvements.') + if not args.cuda_malloc: + print('Hint: your device supports --cuda-malloc for potential speed improvements.') + print("VAE dtype:", VAE_DTYPE) current_loaded_models = []