diff options
author | brkirch <brkirch@users.noreply.github.com> | 2022-10-11 03:55:48 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2022-10-11 14:24:00 +0000 |
commit | 98fd5cde72d5bda1620ab78416c7828fdc3dc10b (patch) | |
tree | e618b7844c85f153e7ac7798425a5ca360f372d8 /modules/sd_hijack.py | |
parent | c0484f1b986ce7acb0e3596f6089a191279f5442 (diff) | |
download | stable-diffusion-webui-gfx803-98fd5cde72d5bda1620ab78416c7828fdc3dc10b.tar.gz stable-diffusion-webui-gfx803-98fd5cde72d5bda1620ab78416c7828fdc3dc10b.tar.bz2 stable-diffusion-webui-gfx803-98fd5cde72d5bda1620ab78416c7828fdc3dc10b.zip |
Add check for psutil
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 5a1b167f..ac70f876 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -10,6 +10,7 @@ from torch.nn.functional import silu import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
+from modules.sd_hijack_optimizations import invokeAI_mps_available
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
@@ -31,8 +32,13 @@ def apply_optimizations(): print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
- print("Applying cross attention optimization (InvokeAI).")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
+ if not invokeAI_mps_available and shared.device.type == 'mps':
+ print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
+ print("Applying v1 cross attention optimization.")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
+ else:
+ print("Applying cross attention optimization (InvokeAI).")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
|